hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
aa143ce115b9a4f0728132f8be3bceb8d2e35489.hip
// !!! This is a file automatically generated by hipify!!! // CUDA Implementation of the 2D wave equation #include <math.h> #include <hip/hip_runtime.h> #include "InputOutput.h" #include "Utilities.cuh" #include "Matlab_like.cuh" #define BLOCKSIZEX 16 #define BLOCKSIZEY 16 #define DEBUG /***********************************/ /* HOST-SIZE FIELD UPDATE FUNCTION */ /***********************************/ void updateHost(const double * __restrict__ h_uold, const double * __restrict__ h_u, double * __restrict__ h_unew, const double c, const double q, const double r, const int Nx, const int Ny) { for (int j = 1; j < Ny - 1; j++) for (int i = 1; i < Nx - 1; i++) h_unew[j * Nx + i] = q * h_u[j * Nx + i] + r * h_uold[j * Nx + i] + c * (h_u[j * Nx + i - 1] + h_u[j * Nx + i + 1] + h_u[(j + 1) * Nx + i] + h_u[(j - 1) * Nx + i] - 4. * h_u[j * Nx + i]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - NO SHARED MEMORY */ /********************************************************/ __global__ void updateDevice_v0(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx - 1) || (tidx == 0) || (tidy >= Ny - 1) || (tidy == 0)) return; d_unew[tidy * Nx + tidx] = q * d_u[tidy * Nx + tidx] + r * d_uold[tidy * Nx + tidx] + c * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v1 */ /********************************************************/ __global__ void updateDevice_v1(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions are NOT loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if ((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY 1)) && (tidx < Nx - 1) && (tidy < Ny - 1)) // --- If we do not need halo region elements, then use shared memory. d_unew[tidy * Nx + tidx] = q * d_u_sh[threadIdx.x][threadIdx.y] + r * d_uold[tidy * Nx + tidx] + c * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); else if (tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1) // --- Only update "interior" (not boundary) node points // --- If we need halo region elements, then use global memory. d_unew[tidy * Nx + tidx] = q * d_u[tidy * Nx + tidx] + r * d_uold[tidy * Nx + tidx] + c * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v2 */ /********************************************************/ __global__ void updateDevice_v2(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = blockIdx.x * (BLOCKSIZEX - 2) + threadIdx.x; const int tidy = blockIdx.y * (BLOCKSIZEY - 2) + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions ARE loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if (((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY 1))) && (tidx < Nx - 1 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = q * d_u_sh[threadIdx.x][threadIdx.y] + r * d_uold[tidy * Nx + tidx] + c * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v3 */ /********************************************************/ __global__ void updateDevice_v3(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = blockIdx.x * blockDim.x + threadIdx.x; const int tidy = blockIdx.y * blockDim.y + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; const int tid_block = threadIdx.y * BLOCKSIZEX + threadIdx.x; // --- Flattened thread index within a block const int tidx1 = tid_block % (BLOCKSIZEX + 2); const int tidy1 = tid_block / (BLOCKSIZEY + 2); const int tidx2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) % (BLOCKSIZEX + 2); const int tidy2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) / (BLOCKSIZEY + 2); __shared__ double d_u_sh[BLOCKSIZEX + 2][BLOCKSIZEY + 2]; if (((blockIdx.x * BLOCKSIZEX - 1 + tidx1) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx1) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) >= 0)) d_u_sh[tidx1][tidy1] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx1) + (blockIdx.y * BLOCKSIZEY - 1 + tidy1) * Nx]; if (((tidx2 < (BLOCKSIZEX + 2)) && (tidy2 < (BLOCKSIZEY + 2))) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) >= 0)) d_u_sh[tidx2][tidy2] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx2) + (blockIdx.y * BLOCKSIZEY - 1 + tidy2) * Nx]; __syncthreads(); if ((tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = q * d_u_sh[threadIdx.x + 1][threadIdx.y + 1] + r * d_uold[tidy * Nx + tidx] + c * (d_u_sh[threadIdx.x][threadIdx.y + 1] + d_u_sh[threadIdx.x + 2][threadIdx.y + 1] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y + 2] - 4. * d_u_sh[threadIdx.x + 1][threadIdx.y + 1]); } /********/ /* MAIN */ /********/ int main() { const int Nx = 512; // --- Number of mesh points along x const int Ny = 512; // --- Number of mesh points along y const double Lx = 200.; // --- Length of the domain along x const double Ly = 200; // --- Length of the domain along y double *h_x = h_linspace(0., Lx, Nx); // --- Mesh points along x double *h_y = h_linspace(0., Ly, Ny); // --- Mesh points along y const double dx = h_x[2] - h_x[1]; // --- Mesh step along x const double dy = h_y[2] - h_y[1]; // --- Mesh step along y const double v = 5.; // --- Wave speed const double p = 0.02; // --- Wave decay factor const double dt = 0.25 / (v * sqrt((1. / dx) * (1. / dx) + (1. / dy) * (1. / dy))); // --- Time - Step matching the Courant - Friedrichs - Lewy condition const int T = floor((3. * sqrt(Lx * Lx + Ly * Ly) / v) / dt); // --- Total number of time steps double *h_u = (double *)calloc(Nx * Ny, sizeof(double)); // --- Current solution u(x, y, t) - host double *h_uold = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the previous step - host double *h_unew = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the next step - host double *d_u; gpuErrchk(hipMalloc((void**)&d_u, Nx * Ny * sizeof(double))); // --- Current solution u(x, y, t) - device double *d_uold; gpuErrchk(hipMalloc((void**)&d_uold, Nx * Ny * sizeof(double))); // --- Solution at the previous step - device double *d_unew; gpuErrchk(hipMalloc((void**)&d_unew, Nx * Ny * sizeof(double))); // --- Solution at the next step - device gpuErrchk(hipMemset(d_unew, 0, Nx * Ny * sizeof(double))); // --- Initial conditions const int indxc = floor(Nx / 3) - 1; // --- Index for the source location along x const int indyc = floor(Ny / 2) - 1; // --- Index for the source location along y const double xc = h_x[indxc]; // --- x - coordinate of source const double yc = h_y[indyc]; // --- y - coordinate of source const int indRc = 50; const double Rc = Lx / indRc; for (int j = 0; j < Ny; j++) for (int i = 0; i < Nx; i++) { if (sqrt((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) <= Rc) h_u[j * Nx + i] = exp(-indRc * ((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) / Lx); h_uold[j * Nx + i] = h_u[j * Nx + i]; } // --- Transfering the initial condition from host to device gpuErrchk(hipMemcpy(d_uold, h_uold, Nx * Ny * sizeof(double), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_u, h_u, Nx * Ny * sizeof(double), hipMemcpyHostToDevice)); /*********************/ /* ITERATIONS - HOST */ /*********************/ const double c = dt * dt * v * v / (dx * dx); // --- CFL number const double q = 2. - p * dt; const double r = -1. + p * dt; for (int tt = 0; tt < T; tt++) { updateHost(h_uold, h_u, h_unew, c, q, r, Nx, Ny); h_uold = h_u; // --- Curent solution becomes old h_u = h_unew; // --- New solution becomes current h_unew = h_uold; } /***********************/ /* ITERATIONS - DEVICE */ /***********************/ // --- For the cases of no shared memory and shared memory v1 and v3 dim3 Grid(iDivUp(Nx, BLOCKSIZEX), iDivUp(Ny, BLOCKSIZEY)); dim3 Block(BLOCKSIZEX, BLOCKSIZEY); // --- For the case of shared memory v2 only //dim3 Grid(iDivUp(Nx, BLOCKSIZEX - 2), iDivUp(Ny, BLOCKSIZEY - 2)); //dim3 Block(BLOCKSIZEX, BLOCKSIZEY); for (int tt = 0; tt < T; tt++) { //updateDevice_v0<<<Grid, Block>>>(d_uold, d_u, d_unew, c, q, r, Nx, Ny); //updateDevice_v1<<<Grid, Block>>>(d_uold, d_u, d_unew, c, q, r, Nx, Ny); //updateDevice_v2 << <Grid, Block >> >(d_uold, d_u, d_unew, c, q, r, Nx, Ny); updateDevice_v3 << <Grid, Block >> >(d_uold, d_u, d_unew, c, q, r, Nx, Ny); #ifdef DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif d_uold = d_u; // --- Curent solution becomes old d_u = d_unew; // --- New solution becomes current d_unew = d_uold; } saveCPUrealtxt(h_u, "D:\\Project\\FDTD\\FDTD_2D_Acoustics\\FDTD2D_hostResult.txt", Nx * Ny); double *h_uDevice = (double *)malloc(Nx * Ny * sizeof(double)); gpuErrchk(hipMemcpy(h_uDevice, d_u, Nx * Ny * sizeof(double), hipMemcpyDeviceToHost)); saveCPUrealtxt(h_uDevice, "D:\\Project\\FDTD\\FDTD_2D_Acoustics\\FDTD2D_deviceResult.txt", Nx * Ny); return 0; }
aa143ce115b9a4f0728132f8be3bceb8d2e35489.cu
// CUDA Implementation of the 2D wave equation #include <math.h> #include <cuda.h> #include "InputOutput.h" #include "Utilities.cuh" #include "Matlab_like.cuh" #define BLOCKSIZEX 16 #define BLOCKSIZEY 16 #define DEBUG /***********************************/ /* HOST-SIZE FIELD UPDATE FUNCTION */ /***********************************/ void updateHost(const double * __restrict__ h_uold, const double * __restrict__ h_u, double * __restrict__ h_unew, const double c, const double q, const double r, const int Nx, const int Ny) { for (int j = 1; j < Ny - 1; j++) for (int i = 1; i < Nx - 1; i++) h_unew[j * Nx + i] = q * h_u[j * Nx + i] + r * h_uold[j * Nx + i] + c * (h_u[j * Nx + i - 1] + h_u[j * Nx + i + 1] + h_u[(j + 1) * Nx + i] + h_u[(j - 1) * Nx + i] - 4. * h_u[j * Nx + i]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - NO SHARED MEMORY */ /********************************************************/ __global__ void updateDevice_v0(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx - 1) || (tidx == 0) || (tidy >= Ny - 1) || (tidy == 0)) return; d_unew[tidy * Nx + tidx] = q * d_u[tidy * Nx + tidx] + r * d_uold[tidy * Nx + tidx] + c * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v1 */ /********************************************************/ __global__ void updateDevice_v1(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions are NOT loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if ((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY ‐ 1)) && (tidx < Nx - 1) && (tidy < Ny - 1)) // --- If we do not need halo region elements, then use shared memory. d_unew[tidy * Nx + tidx] = q * d_u_sh[threadIdx.x][threadIdx.y] + r * d_uold[tidy * Nx + tidx] + c * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); else if (tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1) // --- Only update "interior" (not boundary) node points // --- If we need halo region elements, then use global memory. d_unew[tidy * Nx + tidx] = q * d_u[tidy * Nx + tidx] + r * d_uold[tidy * Nx + tidx] + c * (d_u[tidy * Nx + tidx - 1] + d_u[tidy * Nx + tidx + 1] + d_u[(tidy + 1) * Nx + tidx] + d_u[(tidy - 1) * Nx + tidx] - 4. * d_u[tidy * Nx + tidx]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v2 */ /********************************************************/ __global__ void updateDevice_v2(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = blockIdx.x * (BLOCKSIZEX - 2) + threadIdx.x; const int tidy = blockIdx.y * (BLOCKSIZEY - 2) + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; __shared__ double d_u_sh[BLOCKSIZEX][BLOCKSIZEY]; // --- Load data to shared memory. Halo regions ARE loaded. d_u_sh[threadIdx.x][threadIdx.y] = d_u[tidy * Nx + tidx]; __syncthreads(); if (((threadIdx.x > 0) && (threadIdx.x < (BLOCKSIZEX - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCKSIZEY ‐ 1))) && (tidx < Nx - 1 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = q * d_u_sh[threadIdx.x][threadIdx.y] + r * d_uold[tidy * Nx + tidx] + c * (d_u_sh[threadIdx.x - 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x][threadIdx.y - 1] + d_u_sh[threadIdx.x][threadIdx.y + 1] - 4. * d_u_sh[threadIdx.x][threadIdx.y]); } /********************************************************/ /* DEVICE-SIZE FIELD UPDATE FUNCTION - SHARED MEMORY v3 */ /********************************************************/ __global__ void updateDevice_v3(const double * __restrict__ d_uold, const double * __restrict__ d_u, double * __restrict__ d_unew, const double c, const double q, const double r, const int Nx, const int Ny) { const int tidx = blockIdx.x * blockDim.x + threadIdx.x; const int tidy = blockIdx.y * blockDim.y + threadIdx.y; if ((tidx >= Nx) || (tidy >= Ny)) return; const int tid_block = threadIdx.y * BLOCKSIZEX + threadIdx.x; // --- Flattened thread index within a block const int tidx1 = tid_block % (BLOCKSIZEX + 2); const int tidy1 = tid_block / (BLOCKSIZEY + 2); const int tidx2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) % (BLOCKSIZEX + 2); const int tidy2 = (BLOCKSIZEX * BLOCKSIZEY + tid_block) / (BLOCKSIZEY + 2); __shared__ double d_u_sh[BLOCKSIZEX + 2][BLOCKSIZEY + 2]; if (((blockIdx.x * BLOCKSIZEX - 1 + tidx1) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx1) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy1) >= 0)) d_u_sh[tidx1][tidy1] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx1) + (blockIdx.y * BLOCKSIZEY - 1 + tidy1) * Nx]; if (((tidx2 < (BLOCKSIZEX + 2)) && (tidy2 < (BLOCKSIZEY + 2))) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) < Nx) && ((blockIdx.x * BLOCKSIZEX - 1 + tidx2) >= 0) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) < Ny) && ((blockIdx.y * BLOCKSIZEY - 1 + tidy2) >= 0)) d_u_sh[tidx2][tidy2] = d_u[(blockIdx.x * BLOCKSIZEX - 1 + tidx2) + (blockIdx.y * BLOCKSIZEY - 1 + tidy2) * Nx]; __syncthreads(); if ((tidx > 0 && tidx < Nx - 1 && tidy > 0 && tidy < Ny - 1)) d_unew[tidy * Nx + tidx] = q * d_u_sh[threadIdx.x + 1][threadIdx.y + 1] + r * d_uold[tidy * Nx + tidx] + c * (d_u_sh[threadIdx.x][threadIdx.y + 1] + d_u_sh[threadIdx.x + 2][threadIdx.y + 1] + d_u_sh[threadIdx.x + 1][threadIdx.y] + d_u_sh[threadIdx.x + 1][threadIdx.y + 2] - 4. * d_u_sh[threadIdx.x + 1][threadIdx.y + 1]); } /********/ /* MAIN */ /********/ int main() { const int Nx = 512; // --- Number of mesh points along x const int Ny = 512; // --- Number of mesh points along y const double Lx = 200.; // --- Length of the domain along x const double Ly = 200; // --- Length of the domain along y double *h_x = h_linspace(0., Lx, Nx); // --- Mesh points along x double *h_y = h_linspace(0., Ly, Ny); // --- Mesh points along y const double dx = h_x[2] - h_x[1]; // --- Mesh step along x const double dy = h_y[2] - h_y[1]; // --- Mesh step along y const double v = 5.; // --- Wave speed const double p = 0.02; // --- Wave decay factor const double dt = 0.25 / (v * sqrt((1. / dx) * (1. / dx) + (1. / dy) * (1. / dy))); // --- Time - Step matching the Courant - Friedrichs - Lewy condition const int T = floor((3. * sqrt(Lx * Lx + Ly * Ly) / v) / dt); // --- Total number of time steps double *h_u = (double *)calloc(Nx * Ny, sizeof(double)); // --- Current solution u(x, y, t) - host double *h_uold = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the previous step - host double *h_unew = (double *)calloc(Nx * Ny, sizeof(double)); // --- Solution at the next step - host double *d_u; gpuErrchk(cudaMalloc((void**)&d_u, Nx * Ny * sizeof(double))); // --- Current solution u(x, y, t) - device double *d_uold; gpuErrchk(cudaMalloc((void**)&d_uold, Nx * Ny * sizeof(double))); // --- Solution at the previous step - device double *d_unew; gpuErrchk(cudaMalloc((void**)&d_unew, Nx * Ny * sizeof(double))); // --- Solution at the next step - device gpuErrchk(cudaMemset(d_unew, 0, Nx * Ny * sizeof(double))); // --- Initial conditions const int indxc = floor(Nx / 3) - 1; // --- Index for the source location along x const int indyc = floor(Ny / 2) - 1; // --- Index for the source location along y const double xc = h_x[indxc]; // --- x - coordinate of source const double yc = h_y[indyc]; // --- y - coordinate of source const int indRc = 50; const double Rc = Lx / indRc; for (int j = 0; j < Ny; j++) for (int i = 0; i < Nx; i++) { if (sqrt((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) <= Rc) h_u[j * Nx + i] = exp(-indRc * ((h_x[i] - xc) * (h_x[i] - xc) + (h_y[j] - yc) * (h_y[j] - yc)) / Lx); h_uold[j * Nx + i] = h_u[j * Nx + i]; } // --- Transfering the initial condition from host to device gpuErrchk(cudaMemcpy(d_uold, h_uold, Nx * Ny * sizeof(double), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_u, h_u, Nx * Ny * sizeof(double), cudaMemcpyHostToDevice)); /*********************/ /* ITERATIONS - HOST */ /*********************/ const double c = dt * dt * v * v / (dx * dx); // --- CFL number const double q = 2. - p * dt; const double r = -1. + p * dt; for (int tt = 0; tt < T; tt++) { updateHost(h_uold, h_u, h_unew, c, q, r, Nx, Ny); h_uold = h_u; // --- Curent solution becomes old h_u = h_unew; // --- New solution becomes current h_unew = h_uold; } /***********************/ /* ITERATIONS - DEVICE */ /***********************/ // --- For the cases of no shared memory and shared memory v1 and v3 dim3 Grid(iDivUp(Nx, BLOCKSIZEX), iDivUp(Ny, BLOCKSIZEY)); dim3 Block(BLOCKSIZEX, BLOCKSIZEY); // --- For the case of shared memory v2 only //dim3 Grid(iDivUp(Nx, BLOCKSIZEX - 2), iDivUp(Ny, BLOCKSIZEY - 2)); //dim3 Block(BLOCKSIZEX, BLOCKSIZEY); for (int tt = 0; tt < T; tt++) { //updateDevice_v0<<<Grid, Block>>>(d_uold, d_u, d_unew, c, q, r, Nx, Ny); //updateDevice_v1<<<Grid, Block>>>(d_uold, d_u, d_unew, c, q, r, Nx, Ny); //updateDevice_v2 << <Grid, Block >> >(d_uold, d_u, d_unew, c, q, r, Nx, Ny); updateDevice_v3 << <Grid, Block >> >(d_uold, d_u, d_unew, c, q, r, Nx, Ny); #ifdef DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif d_uold = d_u; // --- Curent solution becomes old d_u = d_unew; // --- New solution becomes current d_unew = d_uold; } saveCPUrealtxt(h_u, "D:\\Project\\FDTD\\FDTD_2D_Acoustics\\FDTD2D_hostResult.txt", Nx * Ny); double *h_uDevice = (double *)malloc(Nx * Ny * sizeof(double)); gpuErrchk(cudaMemcpy(h_uDevice, d_u, Nx * Ny * sizeof(double), cudaMemcpyDeviceToHost)); saveCPUrealtxt(h_uDevice, "D:\\Project\\FDTD\\FDTD_2D_Acoustics\\FDTD2D_deviceResult.txt", Nx * Ny); return 0; }
6be20814ea63cd4535486fa025e28d36da8c5376.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CudaBVH.cuh" #include "../Core/CudaUtils.cuh" #include <thrust/host_vector.h> #include <unordered_map> #include <set> void CudaBVH::GetAABBCollisions(const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<float3>& AABBMins, const thrust::device_vector<float3>& AABBMaxs, const thrust::device_vector<int>& rmlls, const thrust::device_vector<int>& rmlrs, thrust::device_vector<Physics::AABBCollision> &collisions, thrust::device_vector<bool> &flags, const int chunkSize, const uint64_t timestamp) { int leafCount = lefts.size() + 1; int numBlocks = (leafCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; _GetAABBCollisions << <numBlocks, CudaUtils::THREADS_PER_BLOCK >> > (leafCount, cu::raw(lefts), cu::raw(rights), cu::raw(AABBMins), cu::raw(AABBMaxs), cu::raw(rmlls), cu::raw(rmlrs), cu::raw(collisions), cu::raw(flags), chunkSize, timestamp); //printf("Done with AABB cols\n"); } __global__ void CudaBVH::_GetAABBCollisions(const int leafCount, const int *__restrict__ lefts, const int *__restrict__ rights, const float3 *__restrict__ aabbMins, const float3 *__restrict__ aabbMaxs, const int *__restrict__ rmlls, const int *__restrict__ rmlrs, Physics::AABBCollision * __restrict__ collisions, bool * __restrict__ flags, const int chunkSize, const uint64_t timestamp) { int id = CudaUtils::MyID(); if (id >= leafCount) return; const int internalNodeCount = leafCount - 1; const int myLeafID = internalNodeCount + id; const int myChunk = chunkSize * id; int crtCol = 0; int nodeStack[64]; nodeStack[0] = -1; int stackTop = 1; int crtNode = 0; // root do { //printf("[%d] crtNode: %d\n", id, crtNode); int left = lefts[crtNode]; int right = rights[crtNode]; bool overlapL = false; bool overlapR = false; if (rmlls[crtNode] > myLeafID) { if (CudaUtils::AABBOverlap(aabbMins[myLeafID], aabbMaxs[myLeafID], aabbMins[left], aabbMaxs[left])) overlapL = true; } if (rmlrs[crtNode] > myLeafID) { if (CudaUtils::AABBOverlap(aabbMins[myLeafID], aabbMaxs[myLeafID], aabbMins[right], aabbMaxs[right])) overlapR = true; } if (overlapL && left >= internalNodeCount) { if (crtCol < chunkSize) { collisions[myChunk + crtCol] = Physics::AABBCollision(id, left - internalNodeCount);// , timestamp); flags[myChunk + crtCol++] = true; //printf("[%d] Collision between %d and %d\n", id, myLeafID, left - internalNodeCount); } else { //printf("[%d] Out of space, myLeaf: %d, lleaf #: %d, crtCol: %d\n", id, myLeafID, left - internalNodeCount, crtCol++); } } if (overlapR && right >= internalNodeCount) { if (crtCol < chunkSize) { collisions[myChunk + crtCol] = Physics::AABBCollision(id, right - internalNodeCount);// , timestamp); flags[myChunk + crtCol++] = true; //printf("[%d] Collision between %d and %d\n", id, myLeafID, right - internalNodeCount); } else { //printf("[%d] Out of space, myLeaf: %d, rleaf #: %d, crtCol: %d\n", id, myLeafID, right - internalNodeCount, crtCol++); } } bool traverseL = (overlapL && left < internalNodeCount); bool traverseR = (overlapR && right < internalNodeCount); if (!traverseL && !traverseR) { crtNode = nodeStack[--stackTop]; } else { crtNode = traverseL ? left : right; if (traverseL && traverseR) nodeStack[stackTop++] = right; } } while (stackTop != 0); //AABBCollisionSizes[id] = crtCol; } void CudaBVH::ComputeTreeAABBs(const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<int>& parents, thrust::device_vector<int>& nodesVisited, thrust::device_vector<float3> &AABBMins, thrust::device_vector<float3> &AABBMaxs, thrust::device_vector<int> &rmlls, thrust::device_vector<int> &rmlrs) { int leafCount = lefts.size() + 1; int numBlocks = (leafCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; hipLaunchKernelGGL(( ComputeTreeAABBs) , dim3(numBlocks), dim3(CudaUtils::THREADS_PER_BLOCK), 0, 0, leafCount, thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&nodesVisited[0]), thrust::raw_pointer_cast(&AABBMins[0]), thrust::raw_pointer_cast(&AABBMaxs[0]), thrust::raw_pointer_cast(&rmlls[0]), thrust::raw_pointer_cast(&rmlrs[0])); } __global__ void CudaBVH::ComputeTreeAABBs(const int leafNodeCount, const int *__restrict__ lefts, const int *__restrict__ rights, const int *__restrict__ parents, int *__restrict__ nodesVisited, float3 *__restrict__ AABBMins, float3 *__restrict__ AABBMaxs, int * __restrict__ rmlls, int * __restrict__ rmlrs) { int id = CudaUtils::MyID(); if (id >= leafNodeCount) return; for (int pID = parents[leafNodeCount - 1 + id]; atomicExch(&nodesVisited[pID], 1) == 1; pID = parents[pID]) { //printf("[%d] Parent: %d\n", pID, parents[pID]); AABBMins[pID] = CudaUtils::minf3(AABBMins[lefts[pID]], AABBMins[rights[pID]]); AABBMaxs[pID] = CudaUtils::maxf3(AABBMaxs[lefts[pID]], AABBMaxs[rights[pID]]); rmlls[pID] = lefts[pID] >= leafNodeCount - 1 ? lefts[pID] : max(rmlls[lefts[pID]], rmlrs[lefts[pID]]); rmlrs[pID] = rights[pID] >= leafNodeCount - 1 ? rights[pID] : max(rmlls[rights[pID]], rmlrs[rights[pID]]); if (pID == 0) return; } } void CudaBVH::GenerateBVH2(const thrust::device_vector<uint64_t>& sortedMortons, thrust::device_vector<int>& lefts, thrust::device_vector<int>& rights, thrust::device_vector<int>& parents, thrust::device_vector<int>& nodesVisited) { const int internalNodeCount = sortedMortons.size() - 1; int numBlocks = (internalNodeCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; _GenerateBVH2 << <numBlocks, CudaUtils::THREADS_PER_BLOCK >> >(internalNodeCount, thrust::raw_pointer_cast(&sortedMortons[0]), thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&nodesVisited[0])); } __global__ void CudaBVH::_GenerateBVH2(const int internalNodeCount, const uint64_t *__restrict__ sortedMortons, int *__restrict__ lefts, int *__restrict__ rights, int *__restrict__ parents, int *__restrict__ nodesVisited) { int id = CudaUtils::MyID(); if (id >= internalNodeCount) return; nodesVisited[id] = 0; int2 range = FindRange(sortedMortons, internalNodeCount, id); int first = range.x; int last = range.y; int split = FindSplit(sortedMortons, first, last); int leftID = split == first ? internalNodeCount + split : split; int rightID = split + 1 == last ? internalNodeCount + split + 1 : split + 1; //printf("[%d] First: %d, Last: %d, Split: %d\n", id, first, last, split); //printf("[%d] Left: %d, Right: %d\n", id, leftID, rightID); lefts[id] = leftID; rights[id] = rightID; parents[leftID] = id; parents[rightID] = id; } __device__ int2 CudaBVH::FindRange(const uint64_t *__restrict__ sortedMortons, const int count, const int id) { int d = CudaUtils::sgn(CudaUtils::Delta(sortedMortons, id, id + 1, count) - CudaUtils::Delta(sortedMortons, id, id - 1, count)); int delMin = CudaUtils::Delta(sortedMortons, id, id - d, count); int lMax = 2; while (CudaUtils::Delta(sortedMortons, id, id + lMax * d, count) > delMin) lMax <<= 1; //printf("\t[%d] d: %d, dLeft: %d, dRight: %d\n", id, d, CudaUtils::Delta(sortedMortons, id, id - 1, count), CudaUtils::Delta(sortedMortons, id, id + 1, count)); int l = 0; for (int t = lMax / 2; t >= 1; t /= 2) { if (CudaUtils::Delta(sortedMortons, id, id + (l + t) * d, count) > delMin) l += t; } int j = id + l * d; return id <= j ? make_int2(id, j) : make_int2(j, id); } __device__ int CudaBVH::FindSplit(const uint64_t *__restrict__ sortedMortons, const int first, const int last) { uint64_t firstCode = sortedMortons[first]; uint64_t lastCode = sortedMortons[last]; if (firstCode == lastCode) return (first + last) >> 1; int comPref = __clzll(firstCode ^ lastCode); int split = first; int step = last - first; do { step = (step + 1) >> 1; int newSplit = split + step; if (newSplit < last) { uint64_t splitCode = sortedMortons[newSplit]; int splitPref = __clzll(firstCode ^ splitCode); if (splitPref > comPref) split = newSplit; } } while (step > 1); return split; } void CudaBVH::BVHTestUnit(thrust::device_vector<uint64_t>& sortedMortons, const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<int>& parents, const thrust::device_vector<int>& nodesVisited, const thrust::device_vector<float3>& AABBMins, const thrust::device_vector<float3>& AABBMaxs) { thrust::host_vector<int> hLefts = lefts; thrust::host_vector<int> hRights = rights; thrust::host_vector<int> hParents = parents; thrust::host_vector<int> hNodesVisited = nodesVisited; thrust::host_vector<float3> hAABBmins = AABBMins; thrust::host_vector<float3> hAABBmaxs = AABBMaxs; thrust::host_vector<uint64_t> hMortons = sortedMortons; const int leafCount = hMortons.size(); const int internalCount = leafCount - 1; std::unordered_map<uint64_t, int> mortonDuplicates; for (int i = 0; i < hMortons.size() - 1; ++i) { //std::cout << "mortons[" << i << "]: " << hMortons[i] << std::endl; mortonDuplicates[hMortons[i]]++; if (hMortons[i] > hMortons[i + 1]) { std::cout << "Morton codes NOT OK" << std::endl; } } for (auto kvPair : mortonDuplicates) { if (kvPair.second != 1) { // std::cout << "DUPLICATE MORTONS\n"; break; } } std::unordered_map<int, int> parentLinkCount; for (auto parent : hParents) { parentLinkCount[parent]++; } for (auto kvPair : parentLinkCount) { if ((kvPair.first == 0 && kvPair.second != 3) || (kvPair.first > 0 && kvPair.second != 2)) { std::cout << "Parent links NOT OK" << std::endl; break; } if (kvPair.first >= internalCount) { std::cout << "Leaves appear as parents!" << std::endl; break; } } std::unordered_map<int, int> leftLinkCount; for (auto left : hLefts) { leftLinkCount[left]++; } for (auto kvPair : leftLinkCount) { if ((kvPair.first == 0 && kvPair.second != 0) || (kvPair.first > 0 && kvPair.second != 1)) { std::cout << "Left links NOT OK" << std::endl; break; } } std::unordered_map<int, int> rightLinkCount; for (auto right : hRights) { rightLinkCount[right]++; } for (auto kvPair : rightLinkCount) { if ((kvPair.first == 0 && kvPair.second != 0) || (kvPair.first > 0 && kvPair.second != 1)) { std::cout << "Right links NOT OK" << std::endl; break; } } for (auto kvPair : leftLinkCount) { if (rightLinkCount.count(kvPair.first)) { std::cout << "Links NOT OK" << std::endl; break; } } std::set<int> visitedNodes; for (int i = 0; i < hNodesVisited.size(); ++i) { if (hNodesVisited[i] == 1) { visitedNodes.insert(i); } } //if (visitedNodes.size() != hNodesVisited.size()) //{ // std::cout << "Nodes visited NOT OK" << std::endl; //} for (auto aabbMin : hAABBmins) { if (aabbMin != aabbMin) { std::cout << "AABB mins NOT OK" << std::endl; } } for (auto aabbMax : hAABBmaxs) { if (aabbMax != aabbMax) { std::cout << "AABB maxs NOT OK" << std::endl; } } } void CudaBVH::PrintTree(const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<int>& parents, const thrust::device_vector<int> &rmlls, const thrust::device_vector<int> &rmlrs) { hipLaunchKernelGGL(( PrintTree), dim3(1), dim3(1), 0, 0, lefts.size() + 1, thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&rmlls[0]), thrust::raw_pointer_cast(&rmlrs[0])); } __global__ void CudaBVH::PrintTree(const int leafNodeCount, const int *__restrict__ lefts, const int *__restrict__ rights, const int *__restrict__ parents, const int * __restrict__ rmlls, const int * __restrict__ rmlrs) { printf("Internal Nodes:\n"); for (int i = 0; i < leafNodeCount - 1; ++i) { printf("[%d] Left: %d, Right: %d, Parent: %d\n", i, lefts[i], rights[i], parents[i]); } printf("Leaves:\n"); for (int i = 0; i < leafNodeCount; ++i) { printf("[%d] Parent: %d\n", i + leafNodeCount - 1, parents[i + leafNodeCount - 1]); } printf("RMs:\n"); for (int i = 0; i < leafNodeCount - 1; ++i) { printf("[%d] RML: %d, RMR: %d\n", i, rmlls[i], rmlrs[i]); } } //void CudaBVH::GenerateBVH(const thrust::device_vector<unsigned int>& sortedMortons, // thrust::device_vector<int>& lefts, thrust::device_vector<int>& rights, thrust::device_vector<int>& parents, thrust::device_vector<int> &nodesVisited) //{ // // const int internalNodeCount = sortedMortons.size() - 1; // // int numBlocks = (internalNodeCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; // // _GenerateBVH<<<numBlocks, CudaUtils::THREADS_PER_BLOCK>>>(internalNodeCount, thrust::raw_pointer_cast(&sortedMortons[0]), // thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&nodesVisited[0])); //} //__global__ void CudaBVH::_GenerateBVH(const int internalNodeCount, const unsigned int * __restrict__ sortedMortons, // int * __restrict__ lefts, int * __restrict__ rights, int * __restrict__ parents, int * __restrict__ nodesVisited) //{ // int id = CudaUtils::MyID(); // if (id >= internalNodeCount) // return; // // nodesVisited[id] = 0; // // int d = CudaUtils::sgn(CudaUtils::ComPref(sortedMortons[id], sortedMortons[id + 1], id, id + 1, internalNodeCount) - // CudaUtils::ComPref(sortedMortons[id], sortedMortons[id - 1], id, id - 1, internalNodeCount)); // // int2 range = FindRange(sortedMortons, id, d, internalNodeCount); // // int first = range.x; // int last = range.y; // // int j = id + first * d; // // // int split = FindSplit(sortedMortons, id, d, j, first, internalNodeCount); // // // int leftID = min(id, j) == split ? internalNodeCount + split : split; // int rightID = max(id, j) == split + 1 ? internalNodeCount + split + 1 : split + 1; // // // //printf("[%d] First: %d, Last: %d, Split: %d\n", id, id, j, split); // printf("[%d] Left: %d, Right: %d\n", id, leftID, rightID); // // lefts[id] = leftID; // rights[id] = rightID; // // parents[leftID] = id; // parents[rightID] = id; //} // //__device__ int CudaBVH::BinarySearch(const int start, const int del, const unsigned int *__restrict__ sortedMortons, const int id, const int d, const int internalNodeCount) //{ // int l = 0; // // for (int t = ceilf(start / 2); t >= 1; t = ceilf(t >> 1)) // { // if (CudaUtils::ComPref(sortedMortons[id], sortedMortons[id + (l + t) * d], id, id + (l + t) * d, internalNodeCount) > del) // l += t; // } // // return l; //} // //__device__ int2 CudaBVH::FindRange(const unsigned int *__restrict__ sortedMortons, const int id, const int d, const int internalNodeCount) //{ // int delMin = CudaUtils::ComPref(sortedMortons[id], sortedMortons[id - d], id, id - d, internalNodeCount); // int lMax = 2; // // while (CudaUtils::ComPref(sortedMortons[id], sortedMortons[id + lMax * d], id, id + lMax * d, internalNodeCount) > delMin) // lMax <<= 1; // // int l = BinarySearch(lMax, delMin, sortedMortons, id, d, internalNodeCount); // // // // return make_int2(l, lMax); //} // //__device__ int CudaBVH::FindSplit(const unsigned int *__restrict__ sortedMortons, const int id, const int d, const int j, const int l, const int internalNodeCount) //{ // int delNode = CudaUtils::ComPref(sortedMortons[id], sortedMortons[j], id, j, internalNodeCount); // // int s = BinarySearch(l, delNode, sortedMortons, id, d, internalNodeCount); // // return id + s * d + min(d, 0); //}
6be20814ea63cd4535486fa025e28d36da8c5376.cu
#include "CudaBVH.cuh" #include "../Core/CudaUtils.cuh" #include <thrust/host_vector.h> #include <unordered_map> #include <set> void CudaBVH::GetAABBCollisions(const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<float3>& AABBMins, const thrust::device_vector<float3>& AABBMaxs, const thrust::device_vector<int>& rmlls, const thrust::device_vector<int>& rmlrs, thrust::device_vector<Physics::AABBCollision> &collisions, thrust::device_vector<bool> &flags, const int chunkSize, const uint64_t timestamp) { int leafCount = lefts.size() + 1; int numBlocks = (leafCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; _GetAABBCollisions << <numBlocks, CudaUtils::THREADS_PER_BLOCK >> > (leafCount, cu::raw(lefts), cu::raw(rights), cu::raw(AABBMins), cu::raw(AABBMaxs), cu::raw(rmlls), cu::raw(rmlrs), cu::raw(collisions), cu::raw(flags), chunkSize, timestamp); //printf("Done with AABB cols\n"); } __global__ void CudaBVH::_GetAABBCollisions(const int leafCount, const int *__restrict__ lefts, const int *__restrict__ rights, const float3 *__restrict__ aabbMins, const float3 *__restrict__ aabbMaxs, const int *__restrict__ rmlls, const int *__restrict__ rmlrs, Physics::AABBCollision * __restrict__ collisions, bool * __restrict__ flags, const int chunkSize, const uint64_t timestamp) { int id = CudaUtils::MyID(); if (id >= leafCount) return; const int internalNodeCount = leafCount - 1; const int myLeafID = internalNodeCount + id; const int myChunk = chunkSize * id; int crtCol = 0; int nodeStack[64]; nodeStack[0] = -1; int stackTop = 1; int crtNode = 0; // root do { //printf("[%d] crtNode: %d\n", id, crtNode); int left = lefts[crtNode]; int right = rights[crtNode]; bool overlapL = false; bool overlapR = false; if (rmlls[crtNode] > myLeafID) { if (CudaUtils::AABBOverlap(aabbMins[myLeafID], aabbMaxs[myLeafID], aabbMins[left], aabbMaxs[left])) overlapL = true; } if (rmlrs[crtNode] > myLeafID) { if (CudaUtils::AABBOverlap(aabbMins[myLeafID], aabbMaxs[myLeafID], aabbMins[right], aabbMaxs[right])) overlapR = true; } if (overlapL && left >= internalNodeCount) { if (crtCol < chunkSize) { collisions[myChunk + crtCol] = Physics::AABBCollision(id, left - internalNodeCount);// , timestamp); flags[myChunk + crtCol++] = true; //printf("[%d] Collision between %d and %d\n", id, myLeafID, left - internalNodeCount); } else { //printf("[%d] Out of space, myLeaf: %d, lleaf #: %d, crtCol: %d\n", id, myLeafID, left - internalNodeCount, crtCol++); } } if (overlapR && right >= internalNodeCount) { if (crtCol < chunkSize) { collisions[myChunk + crtCol] = Physics::AABBCollision(id, right - internalNodeCount);// , timestamp); flags[myChunk + crtCol++] = true; //printf("[%d] Collision between %d and %d\n", id, myLeafID, right - internalNodeCount); } else { //printf("[%d] Out of space, myLeaf: %d, rleaf #: %d, crtCol: %d\n", id, myLeafID, right - internalNodeCount, crtCol++); } } bool traverseL = (overlapL && left < internalNodeCount); bool traverseR = (overlapR && right < internalNodeCount); if (!traverseL && !traverseR) { crtNode = nodeStack[--stackTop]; } else { crtNode = traverseL ? left : right; if (traverseL && traverseR) nodeStack[stackTop++] = right; } } while (stackTop != 0); //AABBCollisionSizes[id] = crtCol; } void CudaBVH::ComputeTreeAABBs(const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<int>& parents, thrust::device_vector<int>& nodesVisited, thrust::device_vector<float3> &AABBMins, thrust::device_vector<float3> &AABBMaxs, thrust::device_vector<int> &rmlls, thrust::device_vector<int> &rmlrs) { int leafCount = lefts.size() + 1; int numBlocks = (leafCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; ComputeTreeAABBs <<<numBlocks, CudaUtils::THREADS_PER_BLOCK>>>(leafCount, thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&nodesVisited[0]), thrust::raw_pointer_cast(&AABBMins[0]), thrust::raw_pointer_cast(&AABBMaxs[0]), thrust::raw_pointer_cast(&rmlls[0]), thrust::raw_pointer_cast(&rmlrs[0])); } __global__ void CudaBVH::ComputeTreeAABBs(const int leafNodeCount, const int *__restrict__ lefts, const int *__restrict__ rights, const int *__restrict__ parents, int *__restrict__ nodesVisited, float3 *__restrict__ AABBMins, float3 *__restrict__ AABBMaxs, int * __restrict__ rmlls, int * __restrict__ rmlrs) { int id = CudaUtils::MyID(); if (id >= leafNodeCount) return; for (int pID = parents[leafNodeCount - 1 + id]; atomicExch(&nodesVisited[pID], 1) == 1; pID = parents[pID]) { //printf("[%d] Parent: %d\n", pID, parents[pID]); AABBMins[pID] = CudaUtils::minf3(AABBMins[lefts[pID]], AABBMins[rights[pID]]); AABBMaxs[pID] = CudaUtils::maxf3(AABBMaxs[lefts[pID]], AABBMaxs[rights[pID]]); rmlls[pID] = lefts[pID] >= leafNodeCount - 1 ? lefts[pID] : max(rmlls[lefts[pID]], rmlrs[lefts[pID]]); rmlrs[pID] = rights[pID] >= leafNodeCount - 1 ? rights[pID] : max(rmlls[rights[pID]], rmlrs[rights[pID]]); if (pID == 0) return; } } void CudaBVH::GenerateBVH2(const thrust::device_vector<uint64_t>& sortedMortons, thrust::device_vector<int>& lefts, thrust::device_vector<int>& rights, thrust::device_vector<int>& parents, thrust::device_vector<int>& nodesVisited) { const int internalNodeCount = sortedMortons.size() - 1; int numBlocks = (internalNodeCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; _GenerateBVH2 << <numBlocks, CudaUtils::THREADS_PER_BLOCK >> >(internalNodeCount, thrust::raw_pointer_cast(&sortedMortons[0]), thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&nodesVisited[0])); } __global__ void CudaBVH::_GenerateBVH2(const int internalNodeCount, const uint64_t *__restrict__ sortedMortons, int *__restrict__ lefts, int *__restrict__ rights, int *__restrict__ parents, int *__restrict__ nodesVisited) { int id = CudaUtils::MyID(); if (id >= internalNodeCount) return; nodesVisited[id] = 0; int2 range = FindRange(sortedMortons, internalNodeCount, id); int first = range.x; int last = range.y; int split = FindSplit(sortedMortons, first, last); int leftID = split == first ? internalNodeCount + split : split; int rightID = split + 1 == last ? internalNodeCount + split + 1 : split + 1; //printf("[%d] First: %d, Last: %d, Split: %d\n", id, first, last, split); //printf("[%d] Left: %d, Right: %d\n", id, leftID, rightID); lefts[id] = leftID; rights[id] = rightID; parents[leftID] = id; parents[rightID] = id; } __device__ int2 CudaBVH::FindRange(const uint64_t *__restrict__ sortedMortons, const int count, const int id) { int d = CudaUtils::sgn(CudaUtils::Delta(sortedMortons, id, id + 1, count) - CudaUtils::Delta(sortedMortons, id, id - 1, count)); int delMin = CudaUtils::Delta(sortedMortons, id, id - d, count); int lMax = 2; while (CudaUtils::Delta(sortedMortons, id, id + lMax * d, count) > delMin) lMax <<= 1; //printf("\t[%d] d: %d, dLeft: %d, dRight: %d\n", id, d, CudaUtils::Delta(sortedMortons, id, id - 1, count), CudaUtils::Delta(sortedMortons, id, id + 1, count)); int l = 0; for (int t = lMax / 2; t >= 1; t /= 2) { if (CudaUtils::Delta(sortedMortons, id, id + (l + t) * d, count) > delMin) l += t; } int j = id + l * d; return id <= j ? make_int2(id, j) : make_int2(j, id); } __device__ int CudaBVH::FindSplit(const uint64_t *__restrict__ sortedMortons, const int first, const int last) { uint64_t firstCode = sortedMortons[first]; uint64_t lastCode = sortedMortons[last]; if (firstCode == lastCode) return (first + last) >> 1; int comPref = __clzll(firstCode ^ lastCode); int split = first; int step = last - first; do { step = (step + 1) >> 1; int newSplit = split + step; if (newSplit < last) { uint64_t splitCode = sortedMortons[newSplit]; int splitPref = __clzll(firstCode ^ splitCode); if (splitPref > comPref) split = newSplit; } } while (step > 1); return split; } void CudaBVH::BVHTestUnit(thrust::device_vector<uint64_t>& sortedMortons, const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<int>& parents, const thrust::device_vector<int>& nodesVisited, const thrust::device_vector<float3>& AABBMins, const thrust::device_vector<float3>& AABBMaxs) { thrust::host_vector<int> hLefts = lefts; thrust::host_vector<int> hRights = rights; thrust::host_vector<int> hParents = parents; thrust::host_vector<int> hNodesVisited = nodesVisited; thrust::host_vector<float3> hAABBmins = AABBMins; thrust::host_vector<float3> hAABBmaxs = AABBMaxs; thrust::host_vector<uint64_t> hMortons = sortedMortons; const int leafCount = hMortons.size(); const int internalCount = leafCount - 1; std::unordered_map<uint64_t, int> mortonDuplicates; for (int i = 0; i < hMortons.size() - 1; ++i) { //std::cout << "mortons[" << i << "]: " << hMortons[i] << std::endl; mortonDuplicates[hMortons[i]]++; if (hMortons[i] > hMortons[i + 1]) { std::cout << "Morton codes NOT OK" << std::endl; } } for (auto kvPair : mortonDuplicates) { if (kvPair.second != 1) { // std::cout << "DUPLICATE MORTONS\n"; break; } } std::unordered_map<int, int> parentLinkCount; for (auto parent : hParents) { parentLinkCount[parent]++; } for (auto kvPair : parentLinkCount) { if ((kvPair.first == 0 && kvPair.second != 3) || (kvPair.first > 0 && kvPair.second != 2)) { std::cout << "Parent links NOT OK" << std::endl; break; } if (kvPair.first >= internalCount) { std::cout << "Leaves appear as parents!" << std::endl; break; } } std::unordered_map<int, int> leftLinkCount; for (auto left : hLefts) { leftLinkCount[left]++; } for (auto kvPair : leftLinkCount) { if ((kvPair.first == 0 && kvPair.second != 0) || (kvPair.first > 0 && kvPair.second != 1)) { std::cout << "Left links NOT OK" << std::endl; break; } } std::unordered_map<int, int> rightLinkCount; for (auto right : hRights) { rightLinkCount[right]++; } for (auto kvPair : rightLinkCount) { if ((kvPair.first == 0 && kvPair.second != 0) || (kvPair.first > 0 && kvPair.second != 1)) { std::cout << "Right links NOT OK" << std::endl; break; } } for (auto kvPair : leftLinkCount) { if (rightLinkCount.count(kvPair.first)) { std::cout << "Links NOT OK" << std::endl; break; } } std::set<int> visitedNodes; for (int i = 0; i < hNodesVisited.size(); ++i) { if (hNodesVisited[i] == 1) { visitedNodes.insert(i); } } //if (visitedNodes.size() != hNodesVisited.size()) //{ // std::cout << "Nodes visited NOT OK" << std::endl; //} for (auto aabbMin : hAABBmins) { if (aabbMin != aabbMin) { std::cout << "AABB mins NOT OK" << std::endl; } } for (auto aabbMax : hAABBmaxs) { if (aabbMax != aabbMax) { std::cout << "AABB maxs NOT OK" << std::endl; } } } void CudaBVH::PrintTree(const thrust::device_vector<int>& lefts, const thrust::device_vector<int>& rights, const thrust::device_vector<int>& parents, const thrust::device_vector<int> &rmlls, const thrust::device_vector<int> &rmlrs) { PrintTree<<<1, 1>>>(lefts.size() + 1, thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&rmlls[0]), thrust::raw_pointer_cast(&rmlrs[0])); } __global__ void CudaBVH::PrintTree(const int leafNodeCount, const int *__restrict__ lefts, const int *__restrict__ rights, const int *__restrict__ parents, const int * __restrict__ rmlls, const int * __restrict__ rmlrs) { printf("Internal Nodes:\n"); for (int i = 0; i < leafNodeCount - 1; ++i) { printf("[%d] Left: %d, Right: %d, Parent: %d\n", i, lefts[i], rights[i], parents[i]); } printf("Leaves:\n"); for (int i = 0; i < leafNodeCount; ++i) { printf("[%d] Parent: %d\n", i + leafNodeCount - 1, parents[i + leafNodeCount - 1]); } printf("RMs:\n"); for (int i = 0; i < leafNodeCount - 1; ++i) { printf("[%d] RML: %d, RMR: %d\n", i, rmlls[i], rmlrs[i]); } } //void CudaBVH::GenerateBVH(const thrust::device_vector<unsigned int>& sortedMortons, // thrust::device_vector<int>& lefts, thrust::device_vector<int>& rights, thrust::device_vector<int>& parents, thrust::device_vector<int> &nodesVisited) //{ // // const int internalNodeCount = sortedMortons.size() - 1; // // int numBlocks = (internalNodeCount + CudaUtils::THREADS_PER_BLOCK - 1) / CudaUtils::THREADS_PER_BLOCK; // // _GenerateBVH<<<numBlocks, CudaUtils::THREADS_PER_BLOCK>>>(internalNodeCount, thrust::raw_pointer_cast(&sortedMortons[0]), // thrust::raw_pointer_cast(&lefts[0]), thrust::raw_pointer_cast(&rights[0]), thrust::raw_pointer_cast(&parents[0]), thrust::raw_pointer_cast(&nodesVisited[0])); //} //__global__ void CudaBVH::_GenerateBVH(const int internalNodeCount, const unsigned int * __restrict__ sortedMortons, // int * __restrict__ lefts, int * __restrict__ rights, int * __restrict__ parents, int * __restrict__ nodesVisited) //{ // int id = CudaUtils::MyID(); // if (id >= internalNodeCount) // return; // // nodesVisited[id] = 0; // // int d = CudaUtils::sgn(CudaUtils::ComPref(sortedMortons[id], sortedMortons[id + 1], id, id + 1, internalNodeCount) - // CudaUtils::ComPref(sortedMortons[id], sortedMortons[id - 1], id, id - 1, internalNodeCount)); // // int2 range = FindRange(sortedMortons, id, d, internalNodeCount); // // int first = range.x; // int last = range.y; // // int j = id + first * d; // // // int split = FindSplit(sortedMortons, id, d, j, first, internalNodeCount); // // // int leftID = min(id, j) == split ? internalNodeCount + split : split; // int rightID = max(id, j) == split + 1 ? internalNodeCount + split + 1 : split + 1; // // // //printf("[%d] First: %d, Last: %d, Split: %d\n", id, id, j, split); // printf("[%d] Left: %d, Right: %d\n", id, leftID, rightID); // // lefts[id] = leftID; // rights[id] = rightID; // // parents[leftID] = id; // parents[rightID] = id; //} // //__device__ int CudaBVH::BinarySearch(const int start, const int del, const unsigned int *__restrict__ sortedMortons, const int id, const int d, const int internalNodeCount) //{ // int l = 0; // // for (int t = ceilf(start / 2); t >= 1; t = ceilf(t >> 1)) // { // if (CudaUtils::ComPref(sortedMortons[id], sortedMortons[id + (l + t) * d], id, id + (l + t) * d, internalNodeCount) > del) // l += t; // } // // return l; //} // //__device__ int2 CudaBVH::FindRange(const unsigned int *__restrict__ sortedMortons, const int id, const int d, const int internalNodeCount) //{ // int delMin = CudaUtils::ComPref(sortedMortons[id], sortedMortons[id - d], id, id - d, internalNodeCount); // int lMax = 2; // // while (CudaUtils::ComPref(sortedMortons[id], sortedMortons[id + lMax * d], id, id + lMax * d, internalNodeCount) > delMin) // lMax <<= 1; // // int l = BinarySearch(lMax, delMin, sortedMortons, id, d, internalNodeCount); // // // // return make_int2(l, lMax); //} // //__device__ int CudaBVH::FindSplit(const unsigned int *__restrict__ sortedMortons, const int id, const int d, const int j, const int l, const int internalNodeCount) //{ // int delNode = CudaUtils::ComPref(sortedMortons[id], sortedMortons[j], id, j, internalNodeCount); // // int s = BinarySearch(l, delNode, sortedMortons, id, d, internalNodeCount); // // return id + s * d + min(d, 0); //}
e2247119a4d39151438d39f72835f7c2722b254f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // --------------------------------------------------------------------------- // Unified Panoptic Segmentation Network // // Modifications Copyright (c) 2019 Uber Technologies, Inc. // --------------------------------------------------------------------------- // Based on: // ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); }
e2247119a4d39151438d39f72835f7c2722b254f.cu
// --------------------------------------------------------------------------- // Unified Panoptic Segmentation Network // // Modifications Copyright (c) 2019 Uber Technologies, Inc. // --------------------------------------------------------------------------- // Based on: // ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); }
19f44db39a2dcb57399fce11b7bce360e73ccc70.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (idata[index]) { bools[index] = 1; } else { bools[index] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (bools[index]) { odata[indices[index]] = idata[index]; } } } }
19f44db39a2dcb57399fce11b7bce360e73ccc70.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (idata[index]) { bools[index] = 1; } else { bools[index] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (bools[index]) { odata[indices[index]] = idata[index]; } } } }
07ac62143c0606023280d2b38e086048d7fe0223.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2007-2012 Computational Electromagnetic Group (CEM), Dept. ECE, UC San Diego. All rights reserved. * Author: Shaojing Li, March 2012 */ /* * field_static_vector_gpu.cu: class definition of Class FieldStaticVectorGpu */ #include "field_static_vector_gpu.h" #include "direct.h" #include "error.h" #include "gpu.h" #include "memory.h" #include "memory_gpu.h" #include "nbodyfast.h" #include "nufft.h" namespace NBODYFAST_NS{ FieldStaticVectorGpu :: FieldStaticVectorGpu(class NBODYFAST *n_ptr) : FieldStaticVector(n_ptr), FieldGpu(n_ptr), Field(n_ptr) { d_src_amp = NULL; d_field_amp = NULL; } FieldStaticVectorGpu :: ~FieldStaticVectorGpu() { hipError_t _cuda_error; for (int _thread_id = 0; _thread_id < nbodyfast->num_devices; _thread_id++) { int _cur_dev = nbodyfast->device_name[_thread_id]; hipError_t _cuda_error; nbodyfast->gpu->memory_gpu->free_device<FP_TYPE>(_cuda_error, &d_field_amp[_thread_id], _cur_dev); nbodyfast->gpu->memory_gpu->free_device<FP_TYPE>(_cuda_error, &d_src_amp[_thread_id], _cur_dev); } nbodyfast->memory->free_host<FP_TYPE*>(&d_field_amp); nbodyfast->memory->free_host<FP_TYPE*>(&d_src_amp); } int FieldStaticVectorGpu :: array_alloc_multi_interface() { FieldStaticVector::array_alloc_multi_interface(); FieldGpu::array_alloc_multi_interface(); nbodyfast->error->last_error = amp_field_alloc_multi(); return 0; } int FieldStaticVectorGpu :: amp_field_alloc() { FieldStaticVector::amp_field_alloc(); nbodyfast->memory->alloc_host<FP_TYPE*>(&d_src_amp, nbodyfast->num_devices, "field->d_src_amp"); nbodyfast->memory->alloc_host<FP_TYPE*>(&d_field_amp, nbodyfast->num_devices, "field->d_field_amp"); for (int i = 0; i < nbodyfast->num_devices; i++) { d_src_amp[i] = NULL; d_field_amp[i] = NULL; } if (nbodyfast->multi_device == false) amp_field_alloc_single(); // similar to coordinate arrays in the Class FieldGpu, source and amplitude arrays for multi GPU calculation are allocated after preprocessing return 0; } int FieldStaticVectorGpu :: amp_field_alloc_single() { hipError_t _cuda_error; nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_src_amp[0], FIELD_DIM*nbodyfast->problem_size, "field->d_src_amp[0]", nbodyfast->gpu->dev_list[0].index); nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_field_amp[0], FIELD_DIM*nbodyfast->problem_size, "field->d_field_amp[0]", nbodyfast->gpu->dev_list[0].index); return 0; } int FieldStaticVectorGpu :: amp_field_alloc_multi() { #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); int _cur_dev = nbodyfast->device_name[_thread_id]; hipError_t _cuda_error; std::stringstream _array_name; _array_name <<"nufft->d_src_amp[" << _thread_id << "]"; { nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_src_amp[_thread_id], FIELD_DIM*nbodyfast->src_size_dev[_thread_id], _array_name.str(), _cur_dev); } _array_name.str(""); _array_name <<"nufft->d_field_amp[" << _thread_id << "]"; { nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_field_amp[_thread_id], FIELD_DIM*nbodyfast->obs_size_dev[_thread_id], _array_name.str(), _cur_dev); } } #pragma omp barrier // // nbodyfast->memory->output_allocated_list(); // nbodyfast->gpu->memory_gpu->output_allocated_list(); return 0; } // Get source amplitudes from outside, the set_src_amp is usually called at every iteration, at the beginning of execution int FieldStaticVectorGpu :: set_src_amp(double *_charge) { nbodyfast->error->last_error = nbodyfast->multi_device ? set_src_amp_multi(_charge) : set_src_amp_single(_charge); return 0; } // single GPU calculation goes here int FieldStaticVectorGpu :: set_src_amp_single(double *_charge) { hipDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { int *src_trans_idx = nbodyfast->nufft->get_src_trans_idx(); #pragma omp parallel for for (int i = 0; i < nbodyfast->problem_size; i++) { for(unsigned int j = 0; j < FIELD_DIM; j++){ // to copy data for NUFFT, we need src_trans_idx[] as the reordering table src_amp[i+j*nbodyfast->problem_size] = static_cast<FP_TYPE>(_charge[src_trans_idx[i]+j*nbodyfast->problem_size]); field_amp[i+j*nbodyfast->problem_size] = 0.0f; } } // copy source amplitudes to device and set field amplitudes to 0 nbodyfast->gpu->memory_gpu->memcpy_host_to_device<FP_TYPE>(d_src_amp[0], src_amp, FIELD_DIM*nbodyfast->problem_size, nbodyfast->device_name[0]); nbodyfast->gpu->memory_gpu->memset_device<FP_TYPE>(d_field_amp[0], 0, FIELD_DIM*nbodyfast->problem_size, nbodyfast->device_name[0]); hipDeviceSynchronize(); return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { for (int i = 0; i < FIELD_DIM*nbodyfast->problem_size; i++) { src_amp[i] = FP_TYPE(_charge[i]); field_amp[i] = 0.0f; } nbodyfast->gpu->memory_gpu->memcpy_host_to_device<FP_TYPE>(d_src_amp[0], src_amp, FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); nbodyfast->gpu->memory_gpu->memset_device<FP_TYPE>(d_field_amp[0], 0, FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); hipDeviceSynchronize(); return 0; } std::cout << "unknown algorithm..., exiting..." << std::endl; exit(0); return -1; } // multi GPU calculation goes here int FieldStaticVectorGpu :: set_src_amp_multi(double *_charge) { hipDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { int **src_trans_idx_dev = nbodyfast->nufft->get_src_trans_idx_dev(); int *src_trans_idx = nbodyfast->nufft->get_src_trans_idx(); #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); int _cur_dev = nbodyfast->device_name[_thread_id]; hipError_t _cuda_error; for (int i = 0; i < nbodyfast->src_size_dev[_thread_id]; i++) { for( unsigned int j = 0; j < FIELD_DIM; j++){ src_amp_dev[_thread_id][i+j*nbodyfast->src_size_dev[_thread_id]] = _charge[src_trans_idx[src_trans_idx_dev[_thread_id][i]]+j*nbodyfast->problem_size]; // //src_amp_dev[_thread_id][i] = src_amp[src_trans_idx_dev[_thread_id][i]]; // //src_amp_dev[_thread_id][i] = _charge[src_trans_idx_dev[_thread_id][i]]; } } for( unsigned int i = 0; i < FIELD_DIM*nbodyfast->obs_size_dev[_thread_id]; i++) field_amp_dev[_thread_id][i] = 0.0f; // copy source amplitudes to device and set field amplitudes to 0 nbodyfast->gpu->memory_gpu->memcpy_host_to_device<FP_TYPE>(d_src_amp[_thread_id], src_amp_dev[_thread_id], FIELD_DIM*nbodyfast->src_size_dev[_thread_id], _cur_dev); nbodyfast->gpu->memory_gpu->memset_device<FP_TYPE>(d_field_amp[_thread_id], 0, FIELD_DIM*nbodyfast->obs_size_dev[_thread_id], _cur_dev); } hipDeviceSynchronize(); #pragma omp barrier return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { std::cout << "direct method on multi GPU is not ready yet" << std::endl; std::cout << "stopped at \"int FieldStaticVectorGpu :: set_src_amp_single(double *_charge)\" " << std::endl; exit(0); return 0; } std::cout << "unknown algorithm..." << std::endl; exit(0); return -1; } // Transfer field amplitudes to outside, the set_fld_amp is usually called at every iteration, at the end of execution. They are symetrical to set_src series of subroutines int FieldStaticVectorGpu :: set_fld_amp(double *_field) { nbodyfast->error->last_error = nbodyfast->multi_device ? set_fld_amp_multi(_field) : set_fld_amp_single(_field); return 0; } // single GPU calculation goes here int FieldStaticVectorGpu :: set_fld_amp_single(double *_field) { hipError_t hipError_t = hipGetLastError(); if( hipError_t != hipSuccess ) { fprintf(stderr, "CUDA Runtime API Error reported : %s at %s:%s\n", hipGetErrorString(hipError_t), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { nbodyfast->gpu->memory_gpu->memcpy_device_to_host<FP_TYPE>(field_amp, d_field_amp[0], FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); int *obs_trans_idx = nbodyfast->nufft->get_obs_trans_idx(); #pragma omp parallel for for (int i = 0; i < nbodyfast->problem_size; i++) { for(unsigned int j = 0; j < FIELD_DIM; j++) _field[obs_trans_idx[i]+j*nbodyfast->problem_size] = static_cast<double>(field_amp[i+j*nbodyfast->problem_size]); } hipDeviceSynchronize(); return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { nbodyfast->gpu->memory_gpu->memcpy_device_to_host<FP_TYPE>(field_amp, d_field_amp[0], FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); for (int i = 0; i < FIELD_DIM*nbodyfast->problem_size; i++) { _field[i] = static_cast<double>(field_amp[i]); src_amp[i] = 0.0f; field_amp[i] = 0.0f; } //hipDeviceSynchronize(); return 0; } std::cout << "unknown algorithm..." << std::endl; exit(0); return -1; } // multi GPU calculation goes here int FieldStaticVectorGpu :: set_fld_amp_multi(double *_field) { hipDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { int **obs_trans_idx_dev = nbodyfast->nufft->get_obs_trans_idx_dev(); int *obs_trans_idx = nbodyfast->nufft->get_obs_trans_idx(); #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); int _cur_dev = nbodyfast->device_name[_thread_id]; hipError_t _cuda_error; nbodyfast->gpu->memory_gpu->memcpy_device_to_host<FP_TYPE>(field_amp_dev[_thread_id], d_field_amp[_thread_id], FIELD_DIM*nbodyfast->obs_size_dev[_thread_id], _cur_dev); for (int i = 0; i < nbodyfast->obs_size_act_dev[_thread_id]; i++) { //field_amp[obs_trans_idx_dev[_thread_id][i]] = field_amp_dev[_thread_id][i]; for( unsigned int j = 0; j < FIELD_DIM; j++){ _field[obs_trans_idx[obs_trans_idx_dev[_thread_id][i]]+j*nbodyfast->problem_size] = static_cast<double>(field_amp_dev[_thread_id][i+j*nbodyfast->obs_size_dev[_thread_id]]); } //NOT SURE IF SHOULD BE obs_size_dev or obs_size_act_dev!!!!!!!!!!!!!!!!!!!!!!!!!!! //_field[obs_trans_idx_dev[_thread_id][i]] = field_amp_dev[_thread_id][i]; } //hipDeviceSynchronize(); } //#pragma omp barrier //#pragma omp parallel for // for (int i = 0; i < problem_size; i++) // { // _field[obs_trans_idx[i]] = field_amp[i]; // } #pragma omp barrier hipDeviceSynchronize(); return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { std::cout << "direct method on multi GPU is not ready yet" << std::endl; std::cout << "stopped at \"int FieldStaticVectorGpu :: set_src_amp_single(double *_charge)\" " << std::endl; exit(0); return 0; } std::cout << "unknown algorithm..." << std::endl; exit(0); return -1; } }
07ac62143c0606023280d2b38e086048d7fe0223.cu
/* * Copyright 2007-2012 Computational Electromagnetic Group (CEM), Dept. ECE, UC San Diego. All rights reserved. * Author: Shaojing Li, March 2012 */ /* * field_static_vector_gpu.cu: class definition of Class FieldStaticVectorGpu */ #include "field_static_vector_gpu.h" #include "direct.h" #include "error.h" #include "gpu.h" #include "memory.h" #include "memory_gpu.h" #include "nbodyfast.h" #include "nufft.h" namespace NBODYFAST_NS{ FieldStaticVectorGpu :: FieldStaticVectorGpu(class NBODYFAST *n_ptr) : FieldStaticVector(n_ptr), FieldGpu(n_ptr), Field(n_ptr) { d_src_amp = NULL; d_field_amp = NULL; } FieldStaticVectorGpu :: ~FieldStaticVectorGpu() { cudaError_t _cuda_error; for (int _thread_id = 0; _thread_id < nbodyfast->num_devices; _thread_id++) { int _cur_dev = nbodyfast->device_name[_thread_id]; cudaError_t _cuda_error; nbodyfast->gpu->memory_gpu->free_device<FP_TYPE>(_cuda_error, &d_field_amp[_thread_id], _cur_dev); nbodyfast->gpu->memory_gpu->free_device<FP_TYPE>(_cuda_error, &d_src_amp[_thread_id], _cur_dev); } nbodyfast->memory->free_host<FP_TYPE*>(&d_field_amp); nbodyfast->memory->free_host<FP_TYPE*>(&d_src_amp); } int FieldStaticVectorGpu :: array_alloc_multi_interface() { FieldStaticVector::array_alloc_multi_interface(); FieldGpu::array_alloc_multi_interface(); nbodyfast->error->last_error = amp_field_alloc_multi(); return 0; } int FieldStaticVectorGpu :: amp_field_alloc() { FieldStaticVector::amp_field_alloc(); nbodyfast->memory->alloc_host<FP_TYPE*>(&d_src_amp, nbodyfast->num_devices, "field->d_src_amp"); nbodyfast->memory->alloc_host<FP_TYPE*>(&d_field_amp, nbodyfast->num_devices, "field->d_field_amp"); for (int i = 0; i < nbodyfast->num_devices; i++) { d_src_amp[i] = NULL; d_field_amp[i] = NULL; } if (nbodyfast->multi_device == false) amp_field_alloc_single(); // similar to coordinate arrays in the Class FieldGpu, source and amplitude arrays for multi GPU calculation are allocated after preprocessing return 0; } int FieldStaticVectorGpu :: amp_field_alloc_single() { cudaError_t _cuda_error; nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_src_amp[0], FIELD_DIM*nbodyfast->problem_size, "field->d_src_amp[0]", nbodyfast->gpu->dev_list[0].index); nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_field_amp[0], FIELD_DIM*nbodyfast->problem_size, "field->d_field_amp[0]", nbodyfast->gpu->dev_list[0].index); return 0; } int FieldStaticVectorGpu :: amp_field_alloc_multi() { #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); int _cur_dev = nbodyfast->device_name[_thread_id]; cudaError_t _cuda_error; std::stringstream _array_name; _array_name <<"nufft->d_src_amp[" << _thread_id << "]"; { nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_src_amp[_thread_id], FIELD_DIM*nbodyfast->src_size_dev[_thread_id], _array_name.str(), _cur_dev); } _array_name.str(""); _array_name <<"nufft->d_field_amp[" << _thread_id << "]"; { nbodyfast->gpu->memory_gpu->alloc_device<FP_TYPE>(_cuda_error, &d_field_amp[_thread_id], FIELD_DIM*nbodyfast->obs_size_dev[_thread_id], _array_name.str(), _cur_dev); } } #pragma omp barrier // // nbodyfast->memory->output_allocated_list(); // nbodyfast->gpu->memory_gpu->output_allocated_list(); return 0; } // Get source amplitudes from outside, the set_src_amp is usually called at every iteration, at the beginning of execution int FieldStaticVectorGpu :: set_src_amp(double *_charge) { nbodyfast->error->last_error = nbodyfast->multi_device ? set_src_amp_multi(_charge) : set_src_amp_single(_charge); return 0; } // single GPU calculation goes here int FieldStaticVectorGpu :: set_src_amp_single(double *_charge) { cudaDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { int *src_trans_idx = nbodyfast->nufft->get_src_trans_idx(); #pragma omp parallel for for (int i = 0; i < nbodyfast->problem_size; i++) { for(unsigned int j = 0; j < FIELD_DIM; j++){ // to copy data for NUFFT, we need src_trans_idx[] as the reordering table src_amp[i+j*nbodyfast->problem_size] = static_cast<FP_TYPE>(_charge[src_trans_idx[i]+j*nbodyfast->problem_size]); field_amp[i+j*nbodyfast->problem_size] = 0.0f; } } // copy source amplitudes to device and set field amplitudes to 0 nbodyfast->gpu->memory_gpu->memcpy_host_to_device<FP_TYPE>(d_src_amp[0], src_amp, FIELD_DIM*nbodyfast->problem_size, nbodyfast->device_name[0]); nbodyfast->gpu->memory_gpu->memset_device<FP_TYPE>(d_field_amp[0], 0, FIELD_DIM*nbodyfast->problem_size, nbodyfast->device_name[0]); cudaDeviceSynchronize(); return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { for (int i = 0; i < FIELD_DIM*nbodyfast->problem_size; i++) { src_amp[i] = FP_TYPE(_charge[i]); field_amp[i] = 0.0f; } nbodyfast->gpu->memory_gpu->memcpy_host_to_device<FP_TYPE>(d_src_amp[0], src_amp, FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); nbodyfast->gpu->memory_gpu->memset_device<FP_TYPE>(d_field_amp[0], 0, FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); cudaDeviceSynchronize(); return 0; } std::cout << "unknown algorithm..., exiting..." << std::endl; exit(0); return -1; } // multi GPU calculation goes here int FieldStaticVectorGpu :: set_src_amp_multi(double *_charge) { cudaDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { int **src_trans_idx_dev = nbodyfast->nufft->get_src_trans_idx_dev(); int *src_trans_idx = nbodyfast->nufft->get_src_trans_idx(); #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); int _cur_dev = nbodyfast->device_name[_thread_id]; cudaError_t _cuda_error; for (int i = 0; i < nbodyfast->src_size_dev[_thread_id]; i++) { for( unsigned int j = 0; j < FIELD_DIM; j++){ src_amp_dev[_thread_id][i+j*nbodyfast->src_size_dev[_thread_id]] = _charge[src_trans_idx[src_trans_idx_dev[_thread_id][i]]+j*nbodyfast->problem_size]; // //src_amp_dev[_thread_id][i] = src_amp[src_trans_idx_dev[_thread_id][i]]; // //src_amp_dev[_thread_id][i] = _charge[src_trans_idx_dev[_thread_id][i]]; } } for( unsigned int i = 0; i < FIELD_DIM*nbodyfast->obs_size_dev[_thread_id]; i++) field_amp_dev[_thread_id][i] = 0.0f; // copy source amplitudes to device and set field amplitudes to 0 nbodyfast->gpu->memory_gpu->memcpy_host_to_device<FP_TYPE>(d_src_amp[_thread_id], src_amp_dev[_thread_id], FIELD_DIM*nbodyfast->src_size_dev[_thread_id], _cur_dev); nbodyfast->gpu->memory_gpu->memset_device<FP_TYPE>(d_field_amp[_thread_id], 0, FIELD_DIM*nbodyfast->obs_size_dev[_thread_id], _cur_dev); } cudaDeviceSynchronize(); #pragma omp barrier return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { std::cout << "direct method on multi GPU is not ready yet" << std::endl; std::cout << "stopped at \"int FieldStaticVectorGpu :: set_src_amp_single(double *_charge)\" " << std::endl; exit(0); return 0; } std::cout << "unknown algorithm..." << std::endl; exit(0); return -1; } // Transfer field amplitudes to outside, the set_fld_amp is usually called at every iteration, at the end of execution. They are symetrical to set_src series of subroutines int FieldStaticVectorGpu :: set_fld_amp(double *_field) { nbodyfast->error->last_error = nbodyfast->multi_device ? set_fld_amp_multi(_field) : set_fld_amp_single(_field); return 0; } // single GPU calculation goes here int FieldStaticVectorGpu :: set_fld_amp_single(double *_field) { cudaError_t cudaError = cudaGetLastError(); if( cudaError != cudaSuccess ) { fprintf(stderr, "CUDA Runtime API Error reported : %s at %s:%s\n", cudaGetErrorString(cudaError), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { nbodyfast->gpu->memory_gpu->memcpy_device_to_host<FP_TYPE>(field_amp, d_field_amp[0], FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); int *obs_trans_idx = nbodyfast->nufft->get_obs_trans_idx(); #pragma omp parallel for for (int i = 0; i < nbodyfast->problem_size; i++) { for(unsigned int j = 0; j < FIELD_DIM; j++) _field[obs_trans_idx[i]+j*nbodyfast->problem_size] = static_cast<double>(field_amp[i+j*nbodyfast->problem_size]); } cudaDeviceSynchronize(); return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { nbodyfast->gpu->memory_gpu->memcpy_device_to_host<FP_TYPE>(field_amp, d_field_amp[0], FIELD_DIM*nbodyfast->problem_size, nbodyfast->gpu->dev_list[0].index); for (int i = 0; i < FIELD_DIM*nbodyfast->problem_size; i++) { _field[i] = static_cast<double>(field_amp[i]); src_amp[i] = 0.0f; field_amp[i] = 0.0f; } //cudaDeviceSynchronize(); return 0; } std::cout << "unknown algorithm..." << std::endl; exit(0); return -1; } // multi GPU calculation goes here int FieldStaticVectorGpu :: set_fld_amp_multi(double *_field) { cudaDeviceSynchronize(); if (nbodyfast->algo_name == "nufft") { int **obs_trans_idx_dev = nbodyfast->nufft->get_obs_trans_idx_dev(); int *obs_trans_idx = nbodyfast->nufft->get_obs_trans_idx(); #pragma omp barrier #pragma omp parallel { int _thread_id = omp_get_thread_num(); int _cur_dev = nbodyfast->device_name[_thread_id]; cudaError_t _cuda_error; nbodyfast->gpu->memory_gpu->memcpy_device_to_host<FP_TYPE>(field_amp_dev[_thread_id], d_field_amp[_thread_id], FIELD_DIM*nbodyfast->obs_size_dev[_thread_id], _cur_dev); for (int i = 0; i < nbodyfast->obs_size_act_dev[_thread_id]; i++) { //field_amp[obs_trans_idx_dev[_thread_id][i]] = field_amp_dev[_thread_id][i]; for( unsigned int j = 0; j < FIELD_DIM; j++){ _field[obs_trans_idx[obs_trans_idx_dev[_thread_id][i]]+j*nbodyfast->problem_size] = static_cast<double>(field_amp_dev[_thread_id][i+j*nbodyfast->obs_size_dev[_thread_id]]); } //NOT SURE IF SHOULD BE obs_size_dev or obs_size_act_dev!!!!!!!!!!!!!!!!!!!!!!!!!!! //_field[obs_trans_idx_dev[_thread_id][i]] = field_amp_dev[_thread_id][i]; } //cudaDeviceSynchronize(); } //#pragma omp barrier //#pragma omp parallel for // for (int i = 0; i < problem_size; i++) // { // _field[obs_trans_idx[i]] = field_amp[i]; // } #pragma omp barrier cudaDeviceSynchronize(); return 0; } // default source copying for direct methods if (nbodyfast->algo_name == "direct") { std::cout << "direct method on multi GPU is not ready yet" << std::endl; std::cout << "stopped at \"int FieldStaticVectorGpu :: set_src_amp_single(double *_charge)\" " << std::endl; exit(0); return 0; } std::cout << "unknown algorithm..." << std::endl; exit(0); return -1; } }
fbc170ea95b745f763b03925ee718b0fbf709396.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <time.h> #include <random> using namespace std; // nvidia http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf // , __global__ void piCalcKernel(int *d_odata, double* a, double* b, int size) { __shared__ int sdata[1024]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = ((a[i] * a[i] + b[i] * b[i]) < 1 ? 1 : 0); __syncthreads(); for (int j = blockDim.x/2; j > 0; j>>=1) { if (tid < j) { sdata[tid] += sdata[tid + j]; } __syncthreads(); } if (tid == 0) d_odata[blockIdx.x] = sdata[0]; } // Helper function for using CUDA to add vectors in parallel. hipError_t piCalcWithCuda(int* odata, double* a, double* b, int size) { double* dev_a = 0; double* dev_b = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } int* dev_odata = 0; cudaStatus = hipMalloc((void**)&dev_odata, (size / 1024) * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } dim3 blockSize = dim3(1024, 1, 1); dim3 gridSize = dim3(size/1024, 1, 1); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( piCalcKernel) , dim3(gridSize), dim3(blockSize) , 0, 0, dev_odata, dev_a, dev_b, size); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "piCalcKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching piCalcKernel!\n", cudaStatus); } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(odata, dev_odata, (size / 1024) * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } hipFree(dev_a); hipFree(dev_b); hipFree(dev_odata); return cudaStatus; } double simplePiCalc(double* a, double* b, int size) { int sum = 0; for (int i = 0; i < size; i++) { if (a[i] * a[i] + b[i] * b[i] < 1) { sum++; } } return (double) 4*sum / size; } int main() { int N; cout << "Enter the number of numbers: " << endl; cin >> N; // double* a = new double[N]; double* b = new double[N]; int* cudaOut = new int[(N / 1024)]; std::default_random_engine generator; std::uniform_real_distribution<double> distribution(-1.0, 1.0); for (int i = 0; i < N; i++) { a[i] = distribution(generator); b[i] = distribution(generator); } // cuda double result = 0; clock_t start = clock(); hipError_t cudaStatus = piCalcWithCuda(cudaOut, a, b, N); if (cudaStatus != hipSuccess) { fprintf(stderr, "piCalcWithCuda failed!"); } for (int i = 0; i < (N / 1024); i++) { result += cudaOut[i]; } result = 4 * (double) result / (N - N%1024); clock_t end = clock(); cout << "Cuda time: " << (double)(end - start) / CLOCKS_PER_SEC << endl; cout << "Cuda result: " << result << endl; // cpu result = 0; start = clock(); result = simplePiCalc(a, b, N); end = clock(); cout << "CPU time: " << (double)(end - start) / CLOCKS_PER_SEC << endl; cout << "CPU result: " << result << endl; delete[] cudaOut; delete[] a; delete[] b; // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; }
fbc170ea95b745f763b03925ee718b0fbf709396.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <time.h> #include <random> using namespace std; // основной способ сложения данных из статьи nvidia http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf // не самый оптимальный приведенный в статье алгоритм, но и не самый худший __global__ void piCalcKernel(int *d_odata, double* a, double* b, int size) { __shared__ int sdata[1024]; int tid = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = ((a[i] * a[i] + b[i] * b[i]) < 1 ? 1 : 0); __syncthreads(); for (int j = blockDim.x/2; j > 0; j>>=1) { if (tid < j) { sdata[tid] += sdata[tid + j]; } __syncthreads(); } if (tid == 0) d_odata[blockIdx.x] = sdata[0]; } // Helper function for using CUDA to add vectors in parallel. cudaError_t piCalcWithCuda(int* odata, double* a, double* b, int size) { double* dev_a = 0; double* dev_b = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } int* dev_odata = 0; cudaStatus = cudaMalloc((void**)&dev_odata, (size / 1024) * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } dim3 blockSize = dim3(1024, 1, 1); dim3 gridSize = dim3(size/1024, 1, 1); // Launch a kernel on the GPU with one thread for each element. piCalcKernel <<<gridSize, blockSize >>> (dev_odata, dev_a, dev_b, size); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "piCalcKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching piCalcKernel!\n", cudaStatus); } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(odata, dev_odata, (size / 1024) * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_odata); return cudaStatus; } double simplePiCalc(double* a, double* b, int size) { int sum = 0; for (int i = 0; i < size; i++) { if (a[i] * a[i] + b[i] * b[i] < 1) { sum++; } } return (double) 4*sum / size; } int main() { int N; cout << "Enter the number of numbers: " << endl; cin >> N; // выделяем память и генерируем числа double* a = new double[N]; double* b = new double[N]; int* cudaOut = new int[(N / 1024)]; std::default_random_engine generator; std::uniform_real_distribution<double> distribution(-1.0, 1.0); for (int i = 0; i < N; i++) { a[i] = distribution(generator); b[i] = distribution(generator); } // cuda реализация double result = 0; clock_t start = clock(); cudaError_t cudaStatus = piCalcWithCuda(cudaOut, a, b, N); if (cudaStatus != cudaSuccess) { fprintf(stderr, "piCalcWithCuda failed!"); } for (int i = 0; i < (N / 1024); i++) { result += cudaOut[i]; } result = 4 * (double) result / (N - N%1024); clock_t end = clock(); cout << "Cuda time: " << (double)(end - start) / CLOCKS_PER_SEC << endl; cout << "Cuda result: " << result << endl; // cpu реализация result = 0; start = clock(); result = simplePiCalc(a, b, N); end = clock(); cout << "CPU time: " << (double)(end - start) / CLOCKS_PER_SEC << endl; cout << "CPU result: " << result << endl; delete[] cudaOut; delete[] a; delete[] b; // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }
9bdba506d4f9a318a916b479c7ec249b8d895a61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "wb.h" #define SQR(x) ((x)*(x)) #define SQR5(x) ((x)*(x)*(x)*(x)*(x)) #include "cuda.def.cuh" #include "vector3.h" #include <fstream> #include <cmath> class Ray { public: Vector3 origin; Vector3 direction; GCPU_F Ray( const Vector3 & origin, const Vector3 & direction ) : origin( origin ), direction( direction ) {} GCPU_F Ray( const Vector3 & direction ) : Ray( 0, direction ) {} GCPU_F Ray( ) : Ray( Vector3(1, 0, 0) ) {} }; class Material { public: Vector3 albedo; float roughness = 0; float metalness = 0; }; class Sphere { public: float size = 1; Vector3 center; Material material; GCPU_F Sphere() : size(1), center( 0.0 ){} GCPU_F Sphere( Sphere & sphere ) : size( sphere.size ), center( sphere.center ), material( sphere.material ) {} GCPU_F Vector3 normalAt( const Vector3 & hit ) const { return (hit - center).normalize(); } GCPU_F bool intersects( const Vector3 & origin, const Vector3 & direction, Vector3 & hit, float & distance ) const { auto m = origin - center; float b = Vector3::dotProduct( m, direction ); float c = m.length2() - SQR( size ); if( c > 0.0f && b > 0.0f ) return false; float discr = SQR( b ) - c; if( discr < 0.0f ) return false; auto t = -b - sqrt( discr ); if( t < 0.0f ) t = -b + sqrt( discr ); hit = origin + direction * t; distance = t; return true; } friend std::ostream & operator << ( std::ostream & os, const Sphere & sphere ) { os << "Sphere: radius: " << sphere.size << " center: " << sphere.center << " color: " << sphere.material.albedo; return os; } }; class Light { public: Vector3 origin; Vector3 color = 1; float intensity = 1; Light() {} }; class Collision { public: Sphere * sphere = nullptr; Vector3 normal; Vector3 hit; float distance = 2 * 1e38; }; GCPU_F bool getFirstCollision( const Ray & ray, Sphere * spheres, const int sphereCount, Collision & collision ) { collision.distance = 2e38; collision.sphere = nullptr; auto collided = false; for( unsigned int i = 0; i < sphereCount; i++ ) { Vector3 hit; float distance; if( !spheres[i].intersects( ray.origin, ray.direction, hit, distance ) ) continue; if( collision.distance < distance ) continue; collision.distance = distance; collision.hit = hit; collision.normal = spheres[i].normalAt( hit ); collision.sphere = &spheres[i]; collided = true; } return collided; } GCPU_F float GGXDistribution( float alpha2, float NdotH ) { return alpha2 / (M_PI * SQR( NdotH ) * SQR((alpha2 - 1) + 1) ); } GCPU_F float GGXGeometry( float alpha2, float NdotL ) { return 2 * NdotL / (NdotL + sqrt(alpha2 + (1-alpha2) * SQR(NdotL) )); } GCPU_F Vector3 Fresnel( const Vector3 & f0, const float NdotH ) { auto unary = Vector3(1); return f0 + (unary - f0) * (SQR5(unary - NdotH)); } GCPU_F float dodgeZero( float value ) { if( value < 0 ) value = 0; if( value < 0.001 ) value = 0.001; return value; } __global__ void raytrace( Vector3 * image, Sphere * spheres, Light * lights, int width, int height, int sphereCount, int lightCount ) { int idx = threadIdx.x + threadIdx.y * blockDim.y + blockIdx.x * blockDim.x * blockDim.y; if( width * height <= idx ) return; //printf("%d\n", threadIdx.x + threadIdx.y * blockDim.x); int i = idx % width; int j = height - idx / height; //printf("%d %d %d\n", i, j, idx ); //printf("-- %d %d %d\n", spheres[0].center.r, spheres[0].center.g, spheres[0].center.b ); Vector3 pos = Vector3( i, j, 0 ); Ray ray; ray.origin = Vector3( i, j, -100 ); ray.direction = Vector3( 0, 0, 1 ); image[ idx ] = Vector3( 1 - (j / (float)height) ); Collision collision; if( !getFirstCollision( ray, spheres, sphereCount, collision ) ) return; //image[ idx ] = collision.sphere->color; auto material = collision.sphere->material; auto albedo = material.albedo; auto rough = material.roughness * 0.97 + 0.03; auto metal = material.metalness; auto f0 = albedo * (0.04 * (1-metal) + metal); auto alpha = SQR( rough ); auto alpha2 = SQR( alpha ); image[ idx ] = 0; for( unsigned int i = 0; i < lightCount; i++ ) { auto & light = lights[i]; auto color = light.color * light.intensity; auto L = (light.origin - collision.hit).normalize(); auto N = collision.normal; auto V = -ray.direction.normalized(); auto H = (L + V).normalize(); auto NdotL = Vector3::dotProduct( N, L ); if( NdotL < 0 ) continue; auto NdotV = dodgeZero( Vector3::dotProduct( N, V ) ); auto NdotH = dodgeZero( Vector3::dotProduct( N, H ) ); auto D = GGXDistribution( alpha2, NdotL ); auto G = GGXGeometry( alpha2, NdotL ) * GGXGeometry( alpha2, NdotV ); auto F = Fresnel( f0, NdotH ); auto denom = 4 * NdotL * NdotV; auto spec = F * G * D / denom; auto nonspec = (Vector3(1) - F); color *= albedo * (spec + nonspec) / M_PI * NdotL; image[idx] += color; } } void writeP3( std::string fileName, Vector3 * data, int width, int height ) { std::ofstream file; file.open( fileName ); file << "P3\n" << width << " " << height << "\n" << 255 << std::endl; for( auto idx = 0; idx < width * height; idx++ ) { auto d = data[ idx ]; d.clamp( 0, 1 ); std::cout << d * 255 << std::endl; file << (int)(d.r * 255) << " " << (int)(d.g * 255) << " " << (int)(d.b * 255) << " "; } file << std::flush; } class Scene { public: Light * lights; int lightCount = 0; Sphere * spheres; int sphereCount = 0; GCPU_F Scene() {} GCPU_F ~Scene() { if( sphereCount != 0 ) delete [] spheres; if( lightCount != 0 ) delete [] lights; } Scene & addSphere( const Sphere & sphere ) { Sphere * spheres = new Sphere[ sphereCount + 1 ]; for( unsigned int i = 0; i < sphereCount; i++ ) { spheres[i] = this->spheres[i]; } spheres[sphereCount] = sphere; if( sphereCount ) { delete [] this->spheres; } this->spheres = spheres; sphereCount++; return *this; } Scene & addLight( const Light & light ) { Light * lights = new Light[ lightCount + 1 ]; for( unsigned int i = 0; i < lightCount; i++ ) { lights[i] = this->lights[i]; } lights[lightCount] = light; if( lightCount ) { delete [] this->lights; } this->lights = lights; lightCount++; return *this; } }; void populateScene( Scene & scene ) { auto steps = 5; auto stepW = 300 / (steps + 2); auto stepH = 300 / (steps + 2); for( unsigned int i = 0; i <= steps; i++ ) { for( unsigned int j = 0; j <= steps; j++ ) { Sphere sphere; sphere.center = Vector3( stepW * (i + 1), stepH * (j + 1), 0 ); sphere.size = min( stepW, stepH ) / 2.5f; sphere.material.albedo = Vector3(1, 0, 0.5); sphere.material.roughness = (float)i/(float)steps; sphere.material.metalness = (float)j/(float)steps; scene.addSphere( sphere ); } } // Sphere sphere1; // sphere1.center = Vector3( 140, 130, 0 ); // sphere1.size = 100; // sphere1.material.albedo = Vector3( 1, 0, 0 ); // Sphere sphere2; // sphere2.center = Vector3( 210, 230, 0 ); // sphere2.size = 20; // sphere2.material.albedo = Vector3( 0, 0, 1 ); // scene.addSphere( sphere1 ); // scene.addSphere( sphere2 ); Light light1; light1.origin = Vector3( 0, 10000, -10000 ); scene.addLight( light1 ); } int main(int argc, char *argv[]) { hipDeviceReset(); Vector3 * imgData; Vector3 * d_imgData; Sphere * d_spheres; Light * d_lights; int width = 300; int height = 300; auto pixelCount = width * height; imgData = new Vector3[ pixelCount ]; Scene scene; populateScene( scene ); hipMalloc( (void **) &d_imgData, pixelCount * sizeof( Vector3 ) ); hipMalloc( (void **) &d_spheres, scene.sphereCount * sizeof( Sphere ) ); hipMalloc( (void **) &d_lights, scene.lightCount * sizeof( Light ) ); hipMemcpy( d_spheres, scene.spheres, scene.sphereCount * sizeof( Sphere ), hipMemcpyHostToDevice ); hipMemcpy( d_lights, scene.lights, scene.lightCount * sizeof( Light ), hipMemcpyHostToDevice ); dim3 threaddim = dim3( 32, 32, 1 ); auto totalBlockCount = ceil( pixelCount / (float)(threaddim.x * threaddim.y) ); dim3 blockdim = dim3( totalBlockCount, 1, 1 ); hipLaunchKernelGGL(( raytrace), dim3(blockdim), dim3(threaddim) , 0, 0, d_imgData, d_spheres, d_lights, width, height, scene.sphereCount, scene.lightCount ); CHECK_ERROR hipDeviceSynchronize (); hipMemcpy( imgData, d_imgData, pixelCount * sizeof( Vector3 ), hipMemcpyDeviceToHost ); CHECK_ERROR hipFree( d_imgData ); hipFree( d_spheres ); hipFree( d_lights ); writeP3( "output.ppm", imgData, width, height ); delete [] imgData; }
9bdba506d4f9a318a916b479c7ec249b8d895a61.cu
#include "wb.h" #define SQR(x) ((x)*(x)) #define SQR5(x) ((x)*(x)*(x)*(x)*(x)) #include "cuda.def.cuh" #include "vector3.h" #include <fstream> #include <cmath> class Ray { public: Vector3 origin; Vector3 direction; GCPU_F Ray( const Vector3 & origin, const Vector3 & direction ) : origin( origin ), direction( direction ) {} GCPU_F Ray( const Vector3 & direction ) : Ray( 0, direction ) {} GCPU_F Ray( ) : Ray( Vector3(1, 0, 0) ) {} }; class Material { public: Vector3 albedo; float roughness = 0; float metalness = 0; }; class Sphere { public: float size = 1; Vector3 center; Material material; GCPU_F Sphere() : size(1), center( 0.0 ){} GCPU_F Sphere( Sphere & sphere ) : size( sphere.size ), center( sphere.center ), material( sphere.material ) {} GCPU_F Vector3 normalAt( const Vector3 & hit ) const { return (hit - center).normalize(); } GCPU_F bool intersects( const Vector3 & origin, const Vector3 & direction, Vector3 & hit, float & distance ) const { auto m = origin - center; float b = Vector3::dotProduct( m, direction ); float c = m.length2() - SQR( size ); if( c > 0.0f && b > 0.0f ) return false; float discr = SQR( b ) - c; if( discr < 0.0f ) return false; auto t = -b - sqrt( discr ); if( t < 0.0f ) t = -b + sqrt( discr ); hit = origin + direction * t; distance = t; return true; } friend std::ostream & operator << ( std::ostream & os, const Sphere & sphere ) { os << "Sphere: radius: " << sphere.size << " center: " << sphere.center << " color: " << sphere.material.albedo; return os; } }; class Light { public: Vector3 origin; Vector3 color = 1; float intensity = 1; Light() {} }; class Collision { public: Sphere * sphere = nullptr; Vector3 normal; Vector3 hit; float distance = 2 * 1e38; }; GCPU_F bool getFirstCollision( const Ray & ray, Sphere * spheres, const int sphereCount, Collision & collision ) { collision.distance = 2e38; collision.sphere = nullptr; auto collided = false; for( unsigned int i = 0; i < sphereCount; i++ ) { Vector3 hit; float distance; if( !spheres[i].intersects( ray.origin, ray.direction, hit, distance ) ) continue; if( collision.distance < distance ) continue; collision.distance = distance; collision.hit = hit; collision.normal = spheres[i].normalAt( hit ); collision.sphere = &spheres[i]; collided = true; } return collided; } GCPU_F float GGXDistribution( float alpha2, float NdotH ) { return alpha2 / (M_PI * SQR( NdotH ) * SQR((alpha2 - 1) + 1) ); } GCPU_F float GGXGeometry( float alpha2, float NdotL ) { return 2 * NdotL / (NdotL + sqrt(alpha2 + (1-alpha2) * SQR(NdotL) )); } GCPU_F Vector3 Fresnel( const Vector3 & f0, const float NdotH ) { auto unary = Vector3(1); return f0 + (unary - f0) * (SQR5(unary - NdotH)); } GCPU_F float dodgeZero( float value ) { if( value < 0 ) value = 0; if( value < 0.001 ) value = 0.001; return value; } __global__ void raytrace( Vector3 * image, Sphere * spheres, Light * lights, int width, int height, int sphereCount, int lightCount ) { int idx = threadIdx.x + threadIdx.y * blockDim.y + blockIdx.x * blockDim.x * blockDim.y; if( width * height <= idx ) return; //printf("%d\n", threadIdx.x + threadIdx.y * blockDim.x); int i = idx % width; int j = height - idx / height; //printf("%d %d %d\n", i, j, idx ); //printf("-- %d %d %d\n", spheres[0].center.r, spheres[0].center.g, spheres[0].center.b ); Vector3 pos = Vector3( i, j, 0 ); Ray ray; ray.origin = Vector3( i, j, -100 ); ray.direction = Vector3( 0, 0, 1 ); image[ idx ] = Vector3( 1 - (j / (float)height) ); Collision collision; if( !getFirstCollision( ray, spheres, sphereCount, collision ) ) return; //image[ idx ] = collision.sphere->color; auto material = collision.sphere->material; auto albedo = material.albedo; auto rough = material.roughness * 0.97 + 0.03; auto metal = material.metalness; auto f0 = albedo * (0.04 * (1-metal) + metal); auto alpha = SQR( rough ); auto alpha2 = SQR( alpha ); image[ idx ] = 0; for( unsigned int i = 0; i < lightCount; i++ ) { auto & light = lights[i]; auto color = light.color * light.intensity; auto L = (light.origin - collision.hit).normalize(); auto N = collision.normal; auto V = -ray.direction.normalized(); auto H = (L + V).normalize(); auto NdotL = Vector3::dotProduct( N, L ); if( NdotL < 0 ) continue; auto NdotV = dodgeZero( Vector3::dotProduct( N, V ) ); auto NdotH = dodgeZero( Vector3::dotProduct( N, H ) ); auto D = GGXDistribution( alpha2, NdotL ); auto G = GGXGeometry( alpha2, NdotL ) * GGXGeometry( alpha2, NdotV ); auto F = Fresnel( f0, NdotH ); auto denom = 4 * NdotL * NdotV; auto spec = F * G * D / denom; auto nonspec = (Vector3(1) - F); color *= albedo * (spec + nonspec) / M_PI * NdotL; image[idx] += color; } } void writeP3( std::string fileName, Vector3 * data, int width, int height ) { std::ofstream file; file.open( fileName ); file << "P3\n" << width << " " << height << "\n" << 255 << std::endl; for( auto idx = 0; idx < width * height; idx++ ) { auto d = data[ idx ]; d.clamp( 0, 1 ); std::cout << d * 255 << std::endl; file << (int)(d.r * 255) << " " << (int)(d.g * 255) << " " << (int)(d.b * 255) << " "; } file << std::flush; } class Scene { public: Light * lights; int lightCount = 0; Sphere * spheres; int sphereCount = 0; GCPU_F Scene() {} GCPU_F ~Scene() { if( sphereCount != 0 ) delete [] spheres; if( lightCount != 0 ) delete [] lights; } Scene & addSphere( const Sphere & sphere ) { Sphere * spheres = new Sphere[ sphereCount + 1 ]; for( unsigned int i = 0; i < sphereCount; i++ ) { spheres[i] = this->spheres[i]; } spheres[sphereCount] = sphere; if( sphereCount ) { delete [] this->spheres; } this->spheres = spheres; sphereCount++; return *this; } Scene & addLight( const Light & light ) { Light * lights = new Light[ lightCount + 1 ]; for( unsigned int i = 0; i < lightCount; i++ ) { lights[i] = this->lights[i]; } lights[lightCount] = light; if( lightCount ) { delete [] this->lights; } this->lights = lights; lightCount++; return *this; } }; void populateScene( Scene & scene ) { auto steps = 5; auto stepW = 300 / (steps + 2); auto stepH = 300 / (steps + 2); for( unsigned int i = 0; i <= steps; i++ ) { for( unsigned int j = 0; j <= steps; j++ ) { Sphere sphere; sphere.center = Vector3( stepW * (i + 1), stepH * (j + 1), 0 ); sphere.size = min( stepW, stepH ) / 2.5f; sphere.material.albedo = Vector3(1, 0, 0.5); sphere.material.roughness = (float)i/(float)steps; sphere.material.metalness = (float)j/(float)steps; scene.addSphere( sphere ); } } // Sphere sphere1; // sphere1.center = Vector3( 140, 130, 0 ); // sphere1.size = 100; // sphere1.material.albedo = Vector3( 1, 0, 0 ); // Sphere sphere2; // sphere2.center = Vector3( 210, 230, 0 ); // sphere2.size = 20; // sphere2.material.albedo = Vector3( 0, 0, 1 ); // scene.addSphere( sphere1 ); // scene.addSphere( sphere2 ); Light light1; light1.origin = Vector3( 0, 10000, -10000 ); scene.addLight( light1 ); } int main(int argc, char *argv[]) { cudaDeviceReset(); Vector3 * imgData; Vector3 * d_imgData; Sphere * d_spheres; Light * d_lights; int width = 300; int height = 300; auto pixelCount = width * height; imgData = new Vector3[ pixelCount ]; Scene scene; populateScene( scene ); cudaMalloc( (void **) &d_imgData, pixelCount * sizeof( Vector3 ) ); cudaMalloc( (void **) &d_spheres, scene.sphereCount * sizeof( Sphere ) ); cudaMalloc( (void **) &d_lights, scene.lightCount * sizeof( Light ) ); cudaMemcpy( d_spheres, scene.spheres, scene.sphereCount * sizeof( Sphere ), cudaMemcpyHostToDevice ); cudaMemcpy( d_lights, scene.lights, scene.lightCount * sizeof( Light ), cudaMemcpyHostToDevice ); dim3 threaddim = dim3( 32, 32, 1 ); auto totalBlockCount = ceil( pixelCount / (float)(threaddim.x * threaddim.y) ); dim3 blockdim = dim3( totalBlockCount, 1, 1 ); raytrace<<< blockdim, threaddim >>>( d_imgData, d_spheres, d_lights, width, height, scene.sphereCount, scene.lightCount ); CHECK_ERROR cudaDeviceSynchronize (); cudaMemcpy( imgData, d_imgData, pixelCount * sizeof( Vector3 ), cudaMemcpyDeviceToHost ); CHECK_ERROR cudaFree( d_imgData ); cudaFree( d_spheres ); cudaFree( d_lights ); writeP3( "output.ppm", imgData, width, height ); delete [] imgData; }
4994c3378ba203a8e7a3d8378883c1b03d9906a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Studente: Petraglia Mariangela 0522500473 */ #include<cuda.h> #include<stdio.h> //funzioni host void initializeArray(int*,int); void stampaArray(int*, int); void equalArray(int*, int*, int); void prodottoArrayCompPerCompCPU(int *, int *, int *, int); //funzioni kernel __global__ void dotProdGPU(int *, int *, int *, int); __global__ void reduce2(int *, int *, int *, int); __global__ void reduce3(int *, int *, int *, int); int main(int argn, char * argv[]){ //numero di blocchi e numero di thread per blocco dim3 gridDim, blockDim; int N; //numero totale di elementi dell'array //array memorizzati sull'host int *A_host, *B_host, *C_host; //array memorizzati sul device int *A_device, *B_device, *C_device; int *copy, *shared; //array in cui copieremo i risultati di C_device int size; //size in byte di ciascun array int sumC_host, sumC_device, i, sumReduce = 0; int SM = 1536; //max num blocchi 8 int threadEffettiviSM = 0; int blocResidentiSM = 0; int num = 8; int flag; hipEvent_t start, stop; float elapsed; printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n"); if(argn<4){ printf("Numero di parametri insufficiente!!!\n"); printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <flag per la Stampa>\n",argv[0]); printf("Uso dei valori di default\n"); N = 128; flag = 0; } else{ N = atoi(argv[1]); num = atoi(argv[2]); flag = atoi(argv[3]); } blockDim.x = num; //determinazione esatta del numero di blocchi - se la divisione ha resto dobbiamo aggiungere un blocco in pi -> load balancing gridDim = N / blockDim.x + ((N % blockDim.x) == 0 ? 0:1); //size in byte di ogni array size = N*sizeof(int); blocResidentiSM = SM / blockDim.x; //stampa delle info sull'esecuzione del kernel printf("Taglia dell'array N = %d \n", N); printf("Numero di thread per blocco = %d\n", blockDim.x); printf("Numero di blocchi = %d\n", gridDim.x); printf("Numero di blocchi residenti per SM in totale = %d \n", blocResidentiSM); printf("Numero di SM usati in totale = %d \n", blocResidentiSM/8); threadEffettiviSM = blockDim.x * 8; if(threadEffettiviSM == SM) printf("Uso ottimale degli SM \n"); else printf("Usati solo %d thread di %d per ogni SM \n", threadEffettiviSM, SM); //allocazione dati sull'host A_host=(int*)malloc(size); B_host=(int*)malloc(size); C_host=(int*)malloc(size); copy=(int*)malloc(size); //array in cui copieremo i risultati di C_device //allocazione dati sul device hipMalloc((void**)&A_device,size); hipMalloc((void**)&B_device,size); hipMalloc((void**)&C_device,size); //inizializzazione dati sull'host initializeArray(A_host, N); initializeArray(B_host, N); //copia dei dati dall'host al device hipMemcpy(A_device, A_host, size, hipMemcpyHostToDevice); hipMemcpy(B_device, B_host, size, hipMemcpyHostToDevice); //azzeriamo il contenuto della vettore C memset(C_host, 0, size); //setta a 0 l'array C_host hipMemset(C_device, 0, size); //setta a 0 l'array C_device //***STRATEGIA 1***// // calcolo su CPU hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); elapsed = 0; //chiamata alla funzione seriale per il prodotto di due array prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N); hipEventRecord(stop); hipEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo hipEventElapsedTime(&elapsed, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("Tempo CPU=%f\n", elapsed); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //invocazione del kernel hipLaunchKernelGGL(( dotProdGPU), dim3(gridDim), dim3(blockDim), 0, 0, C_device, A_device, B_device, N); //STRATEGIA 1 hipEventRecord(stop); hipEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo hipEventElapsedTime(&elapsed, start, stop);// tempo tra i due eventi in millisecondi hipEventDestroy(start); hipEventDestroy(stop); //copia dei risultati dal device all'host hipMemcpy(copy, C_device, size, hipMemcpyDeviceToHost); //test di correttezza: verifichiamo che le due somme corrispondano sumC_host = 0; sumC_device = 0; for(i=0; i<N; i++){ sumC_host += C_host[i]; sumC_device += copy[i]; } if(sumC_host==sumC_device) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumC_device); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumC_device); printf("Tempo GPU I strategia = %f\n", elapsed); //*** STRATEGIA 2 - shared memory ***// shared = (int*) calloc (N, sizeof(int));//vettore somme parziali hipFree(C_device); hipMalloc((void **)&C_device, gridDim.x*sizeof(int)); //C_Device deve avere un numero di elementi pari al numero di blocchi hipMemset(C_device, 0, size); //setta a 0 l'array C_device hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //invocazione del kernel hipLaunchKernelGGL(( reduce2), dim3(gridDim), dim3(blockDim), blockDim.x * sizeof(int), 0, C_device, A_device, B_device, N); hipEventRecord(stop); hipEventSynchronize(stop); //assicura che tutti siano arrivati all'evento stop prima di registrare il tempo elapsed = 0; hipEventElapsedTime(&elapsed, start, stop); // tempo tra i due eventi in millisecondi hipEventDestroy(start); hipEventDestroy(stop); //copia dei risultati dal device all'host hipMemcpy(shared, C_device, gridDim.x*sizeof(int), hipMemcpyDeviceToHost); sumReduce = 0; for(i=0; i<gridDim.x; i++) sumReduce+=shared[i]; if(sumC_host==sumReduce) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); printf("Tempo GPU II strategia = %f\n", elapsed); //***STRATEGIA 3 - shared memory per evitare divergenza e conflitti di accesso ai banchi del shared mem ***// memset(shared, 0, size); hipMemset(C_device, 0, size); //setta a 0 l'array C_device hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); //invocazione del kernel hipLaunchKernelGGL(( reduce3), dim3(gridDim), dim3(blockDim), blockDim.x * sizeof(int), 0, C_device, A_device, B_device, N); hipEventRecord(stop); hipEventSynchronize(stop); //assicura che tutti siano arrivati all'evento stop prima di registrare il tempo elapsed = 0; hipEventElapsedTime(&elapsed, start, stop);// tempo tra i due eventi in millisecondi hipEventDestroy(start); hipEventDestroy(stop); //copia dei risultati dal device all'host hipMemcpy(shared, C_device, gridDim.x*sizeof(int), hipMemcpyDeviceToHost); sumReduce = 0; for(i=0; i<gridDim.x; i++) sumReduce+=shared[i]; if(sumC_host==sumReduce) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); printf("Tempo GPU III strategia = %f\n", elapsed); //de-allocazione host free(A_host); free(B_host); free(C_host); free(copy); free(shared); //de-allocazione device hipFree(A_device); hipFree(B_device); hipFree(C_device); exit(0); } void initializeArray(int *array, int n){ int i; for(i=0;i<n;i++) array[i] = rand() % 5; } void stampaArray(int* array, int n){ int i; for(i=0;i<n;i++) printf("%d ", array[i]); printf("\n"); } void equalArray(int* a, int*b, int n){ int i=0; while(a[i]==b[i]) i++; if(i<n) printf("I risultati dell'host e del device sono diversi \n"); else printf("I risultati dell'host e del device coincidono \n"); } //Seriale void prodottoArrayCompPerCompCPU(int *a, int *b, int *c, int N){ int i; for(i=0;i<N;i++) c[i]=a[i]*b[i]; } //Parallelo __global__ void dotProdGPU(int *c, int *a, int *b, int N){ // global index int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < N) c[index] = a[index]*b[index]; } __global__ void reduce2(int *c, int *a, int *b, int N){ extern __shared__ int sdata[]; // global index int index = blockDim.x * blockIdx.x + threadIdx.x; //calcolo prodotto if (index < N) sdata[threadIdx.x] = a[index]*b[index]; __syncthreads(); //do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2){ // step = x*2 if(threadIdx.x % (2*s) == 0) { // only threadIDs divisible by step participate sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0) c[blockIdx.x] = sdata[threadIdx.x]; } __global__ void reduce3(int *c, int *a, int *b, int N){ extern __shared__ int sdata[]; // global index int index = blockDim.x * blockIdx.x + threadIdx.x; //calcolo prodotto if (index < N) sdata[threadIdx.x] = a[index]*b[index]; __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s>0; s >>= 1){ // s la distanza // s = s/2 if(threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } // writeresultfor this block to global mem if (threadIdx.x == 0) c[blockIdx.x] = sdata[threadIdx.x]; }
4994c3378ba203a8e7a3d8378883c1b03d9906a7.cu
/* * Studente: Petraglia Mariangela 0522500473 */ #include<cuda.h> #include<stdio.h> //funzioni host void initializeArray(int*,int); void stampaArray(int*, int); void equalArray(int*, int*, int); void prodottoArrayCompPerCompCPU(int *, int *, int *, int); //funzioni kernel __global__ void dotProdGPU(int *, int *, int *, int); __global__ void reduce2(int *, int *, int *, int); __global__ void reduce3(int *, int *, int *, int); int main(int argn, char * argv[]){ //numero di blocchi e numero di thread per blocco dim3 gridDim, blockDim; int N; //numero totale di elementi dell'array //array memorizzati sull'host int *A_host, *B_host, *C_host; //array memorizzati sul device int *A_device, *B_device, *C_device; int *copy, *shared; //array in cui copieremo i risultati di C_device int size; //size in byte di ciascun array int sumC_host, sumC_device, i, sumReduce = 0; int SM = 1536; //max num blocchi 8 int threadEffettiviSM = 0; int blocResidentiSM = 0; int num = 8; int flag; cudaEvent_t start, stop; float elapsed; printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n"); if(argn<4){ printf("Numero di parametri insufficiente!!!\n"); printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <flag per la Stampa>\n",argv[0]); printf("Uso dei valori di default\n"); N = 128; flag = 0; } else{ N = atoi(argv[1]); num = atoi(argv[2]); flag = atoi(argv[3]); } blockDim.x = num; //determinazione esatta del numero di blocchi - se la divisione ha resto dobbiamo aggiungere un blocco in più -> load balancing gridDim = N / blockDim.x + ((N % blockDim.x) == 0 ? 0:1); //size in byte di ogni array size = N*sizeof(int); blocResidentiSM = SM / blockDim.x; //stampa delle info sull'esecuzione del kernel printf("Taglia dell'array N = %d \n", N); printf("Numero di thread per blocco = %d\n", blockDim.x); printf("Numero di blocchi = %d\n", gridDim.x); printf("Numero di blocchi residenti per SM in totale = %d \n", blocResidentiSM); printf("Numero di SM usati in totale = %d \n", blocResidentiSM/8); threadEffettiviSM = blockDim.x * 8; if(threadEffettiviSM == SM) printf("Uso ottimale degli SM \n"); else printf("Usati solo %d thread di %d per ogni SM \n", threadEffettiviSM, SM); //allocazione dati sull'host A_host=(int*)malloc(size); B_host=(int*)malloc(size); C_host=(int*)malloc(size); copy=(int*)malloc(size); //array in cui copieremo i risultati di C_device //allocazione dati sul device cudaMalloc((void**)&A_device,size); cudaMalloc((void**)&B_device,size); cudaMalloc((void**)&C_device,size); //inizializzazione dati sull'host initializeArray(A_host, N); initializeArray(B_host, N); //copia dei dati dall'host al device cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice); cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice); //azzeriamo il contenuto della vettore C memset(C_host, 0, size); //setta a 0 l'array C_host cudaMemset(C_device, 0, size); //setta a 0 l'array C_device //***STRATEGIA 1***// // calcolo su CPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); elapsed = 0; //chiamata alla funzione seriale per il prodotto di due array prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N); cudaEventRecord(stop); cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Tempo CPU=%f\n", elapsed); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //invocazione del kernel dotProdGPU<<<gridDim, blockDim>>>(C_device, A_device, B_device, N); //STRATEGIA 1 cudaEventRecord(stop); cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo cudaEventElapsedTime(&elapsed, start, stop);// tempo tra i due eventi in millisecondi cudaEventDestroy(start); cudaEventDestroy(stop); //copia dei risultati dal device all'host cudaMemcpy(copy, C_device, size, cudaMemcpyDeviceToHost); //test di correttezza: verifichiamo che le due somme corrispondano sumC_host = 0; sumC_device = 0; for(i=0; i<N; i++){ sumC_host += C_host[i]; sumC_device += copy[i]; } if(sumC_host==sumC_device) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumC_device); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumC_device); printf("Tempo GPU I strategia = %f\n", elapsed); //*** STRATEGIA 2 - shared memory ***// shared = (int*) calloc (N, sizeof(int));//vettore somme parziali cudaFree(C_device); cudaMalloc((void **)&C_device, gridDim.x*sizeof(int)); //C_Device deve avere un numero di elementi pari al numero di blocchi cudaMemset(C_device, 0, size); //setta a 0 l'array C_device cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //invocazione del kernel reduce2<<<gridDim, blockDim, blockDim.x * sizeof(int)>>>(C_device, A_device, B_device, N); cudaEventRecord(stop); cudaEventSynchronize(stop); //assicura che tutti siano arrivati all'evento stop prima di registrare il tempo elapsed = 0; cudaEventElapsedTime(&elapsed, start, stop); // tempo tra i due eventi in millisecondi cudaEventDestroy(start); cudaEventDestroy(stop); //copia dei risultati dal device all'host cudaMemcpy(shared, C_device, gridDim.x*sizeof(int), cudaMemcpyDeviceToHost); sumReduce = 0; for(i=0; i<gridDim.x; i++) sumReduce+=shared[i]; if(sumC_host==sumReduce) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); printf("Tempo GPU II strategia = %f\n", elapsed); //***STRATEGIA 3 - shared memory per evitare divergenza e conflitti di accesso ai banchi del shared mem ***// memset(shared, 0, size); cudaMemset(C_device, 0, size); //setta a 0 l'array C_device cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //invocazione del kernel reduce3<<<gridDim, blockDim, blockDim.x * sizeof(int)>>>(C_device, A_device, B_device, N); cudaEventRecord(stop); cudaEventSynchronize(stop); //assicura che tutti siano arrivati all'evento stop prima di registrare il tempo elapsed = 0; cudaEventElapsedTime(&elapsed, start, stop);// tempo tra i due eventi in millisecondi cudaEventDestroy(start); cudaEventDestroy(stop); //copia dei risultati dal device all'host cudaMemcpy(shared, C_device, gridDim.x*sizeof(int), cudaMemcpyDeviceToHost); sumReduce = 0; for(i=0; i<gridDim.x; i++) sumReduce+=shared[i]; if(sumC_host==sumReduce) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); printf("Tempo GPU III strategia = %f\n", elapsed); //de-allocazione host free(A_host); free(B_host); free(C_host); free(copy); free(shared); //de-allocazione device cudaFree(A_device); cudaFree(B_device); cudaFree(C_device); exit(0); } void initializeArray(int *array, int n){ int i; for(i=0;i<n;i++) array[i] = rand() % 5; } void stampaArray(int* array, int n){ int i; for(i=0;i<n;i++) printf("%d ", array[i]); printf("\n"); } void equalArray(int* a, int*b, int n){ int i=0; while(a[i]==b[i]) i++; if(i<n) printf("I risultati dell'host e del device sono diversi \n"); else printf("I risultati dell'host e del device coincidono \n"); } //Seriale void prodottoArrayCompPerCompCPU(int *a, int *b, int *c, int N){ int i; for(i=0;i<N;i++) c[i]=a[i]*b[i]; } //Parallelo __global__ void dotProdGPU(int *c, int *a, int *b, int N){ // global index int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < N) c[index] = a[index]*b[index]; } __global__ void reduce2(int *c, int *a, int *b, int N){ extern __shared__ int sdata[]; // global index int index = blockDim.x * blockIdx.x + threadIdx.x; //calcolo prodotto if (index < N) sdata[threadIdx.x] = a[index]*b[index]; __syncthreads(); //do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2){ // step = x*2 if(threadIdx.x % (2*s) == 0) { // only threadIDs divisible by step participate sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0) c[blockIdx.x] = sdata[threadIdx.x]; } __global__ void reduce3(int *c, int *a, int *b, int N){ extern __shared__ int sdata[]; // global index int index = blockDim.x * blockIdx.x + threadIdx.x; //calcolo prodotto if (index < N) sdata[threadIdx.x] = a[index]*b[index]; __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s>0; s >>= 1){ // s è la distanza // s = s/2 if(threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } // writeresultfor this block to global mem if (threadIdx.x == 0) c[blockIdx.x] = sdata[threadIdx.x]; }
34e59b22ef6fa6f81d4cd33ce87afb6b12b3e3c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author sgazeos@gmail.com // #include <array/NDArrayFactory.h> #include <ops/declarable/helpers/fake_quantization.h> namespace sd { namespace ops { namespace helpers { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // fakeQuantWithMinMaxVars_ // input - input tensor // min - min scalar tensor // max - max scalar tensor // numBits - (default 16bit) // narrowed - shrink is true // output - output tensor // template <typename T> static SD_HOST_DEVICE void nudge(T min, T max, int quantMin, int quantMax, T* scale, T* nudgedMin, T* nudgedMax) { T quantMaxF = static_cast<T>(quantMax); T quantMinF = static_cast<T>(quantMin); *scale = (max - min) / (quantMaxF - quantMinF); auto zeroPointFromMin = quantMinF - min / *scale; uint16_t const nudgedZeroPoint = [zeroPointFromMin, quantMin, quantMax, quantMaxF, quantMinF] { if (zeroPointFromMin < quantMinF) { return static_cast<uint16_t>(quantMin); } if (zeroPointFromMin > quantMaxF) { return static_cast<uint16_t>(quantMax); } return sd::math::sd_round<T, uint16_t>(zeroPointFromMin); }(); *nudgedMax = (quantMaxF - static_cast<T>(nudgedZeroPoint)) * (*scale); *nudgedMin = (quantMinF - static_cast<T>(nudgedZeroPoint)) * (*scale); } template <typename T> void fakeQuantWithMinMaxVars_(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { int lowIntBound = narrowed ? 1 : 0; int upperIntBound = (1 << numBits) - 1; min->syncToHost(); // these are scalars, so nothing much happened max->syncToHost(); T scale, nudgedMin, nudgedMax; nudge(min->t<T>(0), max->t<T>(0), lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax); auto wiseMinMaxAndSoOn = LAMBDA_T(x, nudgedMin, nudgedMax, scale) { T val = x; if (x < nudgedMin) { val = nudgedMin; } else if (x > nudgedMax) { val = nudgedMax; } else val = x; return (math::sd_floor<T, T>((val - nudgedMin) / scale + T(0.5)) * scale + nudgedMin); }; input->applyLambda(wiseMinMaxAndSoOn, *output); } template <typename T> static SD_KERNEL void fakeQuantWithMinMaxKernel(const T* input, const sd::LongType* inputShape, T* min, T* max, int lowIntBound, int upperIntBound, sd::LongType channels, T* output, const sd::LongType* outputShape, sd::LongType length) { __shared__ int block; if (threadIdx.x == 0) { block = length / channels; // to loop with last dimension as block } __syncthreads(); for (auto i = blockIdx.x; i < (int)channels; i += gridDim.x) { T scale, nudgedMin, nudgedMax; nudge(min[i], max[i], lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax); // loop over blocks to quantization between nudged min and max for (auto b = threadIdx.x; b < block; b += blockDim.x) { T val = input[shape::getIndexOffset(b * channels + i, inputShape)]; if (val < nudgedMin) { val = nudgedMin; } else if (val > nudgedMax) { val = nudgedMax; } output[shape::getIndexOffset(b * channels + i, outputShape)] = (math::sd_floor<T, T>((val - nudgedMin) / scale + T(0.5f)) * scale + nudgedMin); }; } } template <typename T> void fakeQuantWithMinMaxVarsPerChannel_(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { int lowIntBound = narrowed ? 1 : 0; int upperIntBound = (1 << numBits) - 1; auto channels = min->lengthOf(); auto length = input->lengthOf(); NDArray::prepareSpecialUse({output}, {min, max, input}); auto stream = context->getCudaStream(); T* inputBuf = input->dataBuffer()->specialAsT<T>(); T* outputBuf = output->dataBuffer()->specialAsT<T>(); T* minBuf = min->dataBuffer()->specialAsT<T>(); T* maxBuf = max->dataBuffer()->specialAsT<T>(); hipLaunchKernelGGL(( fakeQuantWithMinMaxKernel), dim3(128), dim3(256), 256, *stream, inputBuf, input->specialShapeInfo(), minBuf, maxBuf, lowIntBound, upperIntBound, channels, outputBuf, output->specialShapeInfo(), length); NDArray::registerSpecialUse({output}, {min, max, input}); } void fakeQuantWithMinMaxVars(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVars_, (input, min, max, numBits, narrowed, output), SD_FLOAT_TYPES); } void fakeQuantWithMinMaxVarsPerChannel(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVarsPerChannel_, (context, input, min, max, numBits, narrowed, output), SD_FLOAT_TYPES); } } // namespace helpers } // namespace ops } // namespace sd
34e59b22ef6fa6f81d4cd33ce87afb6b12b3e3c8.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author sgazeos@gmail.com // #include <array/NDArrayFactory.h> #include <ops/declarable/helpers/fake_quantization.h> namespace sd { namespace ops { namespace helpers { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // fakeQuantWithMinMaxVars_ // input - input tensor // min - min scalar tensor // max - max scalar tensor // numBits - (default 16bit) // narrowed - shrink is true // output - output tensor // template <typename T> static SD_HOST_DEVICE void nudge(T min, T max, int quantMin, int quantMax, T* scale, T* nudgedMin, T* nudgedMax) { T quantMaxF = static_cast<T>(quantMax); T quantMinF = static_cast<T>(quantMin); *scale = (max - min) / (quantMaxF - quantMinF); auto zeroPointFromMin = quantMinF - min / *scale; uint16_t const nudgedZeroPoint = [zeroPointFromMin, quantMin, quantMax, quantMaxF, quantMinF] { if (zeroPointFromMin < quantMinF) { return static_cast<uint16_t>(quantMin); } if (zeroPointFromMin > quantMaxF) { return static_cast<uint16_t>(quantMax); } return sd::math::sd_round<T, uint16_t>(zeroPointFromMin); }(); *nudgedMax = (quantMaxF - static_cast<T>(nudgedZeroPoint)) * (*scale); *nudgedMin = (quantMinF - static_cast<T>(nudgedZeroPoint)) * (*scale); } template <typename T> void fakeQuantWithMinMaxVars_(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { int lowIntBound = narrowed ? 1 : 0; int upperIntBound = (1 << numBits) - 1; min->syncToHost(); // these are scalars, so nothing much happened max->syncToHost(); T scale, nudgedMin, nudgedMax; nudge(min->t<T>(0), max->t<T>(0), lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax); auto wiseMinMaxAndSoOn = LAMBDA_T(x, nudgedMin, nudgedMax, scale) { T val = x; if (x < nudgedMin) { val = nudgedMin; } else if (x > nudgedMax) { val = nudgedMax; } else val = x; return (math::sd_floor<T, T>((val - nudgedMin) / scale + T(0.5)) * scale + nudgedMin); }; input->applyLambda(wiseMinMaxAndSoOn, *output); } template <typename T> static SD_KERNEL void fakeQuantWithMinMaxKernel(const T* input, const sd::LongType* inputShape, T* min, T* max, int lowIntBound, int upperIntBound, sd::LongType channels, T* output, const sd::LongType* outputShape, sd::LongType length) { __shared__ int block; if (threadIdx.x == 0) { block = length / channels; // to loop with last dimension as block } __syncthreads(); for (auto i = blockIdx.x; i < (int)channels; i += gridDim.x) { T scale, nudgedMin, nudgedMax; nudge(min[i], max[i], lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax); // loop over blocks to quantization between nudged min and max for (auto b = threadIdx.x; b < block; b += blockDim.x) { T val = input[shape::getIndexOffset(b * channels + i, inputShape)]; if (val < nudgedMin) { val = nudgedMin; } else if (val > nudgedMax) { val = nudgedMax; } output[shape::getIndexOffset(b * channels + i, outputShape)] = (math::sd_floor<T, T>((val - nudgedMin) / scale + T(0.5f)) * scale + nudgedMin); }; } } template <typename T> void fakeQuantWithMinMaxVarsPerChannel_(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { int lowIntBound = narrowed ? 1 : 0; int upperIntBound = (1 << numBits) - 1; auto channels = min->lengthOf(); auto length = input->lengthOf(); NDArray::prepareSpecialUse({output}, {min, max, input}); auto stream = context->getCudaStream(); T* inputBuf = input->dataBuffer()->specialAsT<T>(); T* outputBuf = output->dataBuffer()->specialAsT<T>(); T* minBuf = min->dataBuffer()->specialAsT<T>(); T* maxBuf = max->dataBuffer()->specialAsT<T>(); fakeQuantWithMinMaxKernel<<<128, 256, 256, *stream>>>(inputBuf, input->specialShapeInfo(), minBuf, maxBuf, lowIntBound, upperIntBound, channels, outputBuf, output->specialShapeInfo(), length); NDArray::registerSpecialUse({output}, {min, max, input}); } void fakeQuantWithMinMaxVars(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVars_, (input, min, max, numBits, narrowed, output), SD_FLOAT_TYPES); } void fakeQuantWithMinMaxVarsPerChannel(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVarsPerChannel_, (context, input, min, max, numBits, narrowed, output), SD_FLOAT_TYPES); } } // namespace helpers } // namespace ops } // namespace sd
5782d7999030d1b71b54617cc7b570c7bfeeab88.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/transpose.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/reshape.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/transpose.hpp> #include <cudf/utilities/nvtx_utils.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> namespace cudf { namespace detail { std::pair<std::unique_ptr<column>, table_view> transpose(table_view const& input, rmm::mr::device_memory_resource* mr, hipStream_t stream) { // If there are no rows in the input, return successfully if (input.num_columns() == 0 || input.num_rows() == 0) { return std::make_pair(std::make_unique<column>(), table_view{}); } // Check datatype homogeneity auto const dtype = input.column(0).type(); CUDF_EXPECTS( std::all_of( input.begin(), input.end(), [dtype](auto const& col) { return dtype == col.type(); }), "Column type mismatch"); nvtx::range_push("CUDF_TRANSPOSE", nvtx::color::GREEN); auto output_column = cudf::interleave_columns(input, mr); auto one_iter = thrust::make_counting_iterator<size_type>(1); auto splits_iter = thrust::make_transform_iterator( one_iter, [width = input.num_columns()](size_type idx) { return idx * width; }); auto splits = std::vector<size_type>(splits_iter, splits_iter + input.num_rows() - 1); auto output_column_views = cudf::split(output_column->view(), splits); return std::make_pair(std::move(output_column), table_view(output_column_views)); } } // namespace detail std::pair<std::unique_ptr<column>, table_view> transpose(table_view const& input, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::transpose(input, mr); } } // namespace cudf
5782d7999030d1b71b54617cc7b570c7bfeeab88.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/transpose.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/reshape.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/transpose.hpp> #include <cudf/utilities/nvtx_utils.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> namespace cudf { namespace detail { std::pair<std::unique_ptr<column>, table_view> transpose(table_view const& input, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { // If there are no rows in the input, return successfully if (input.num_columns() == 0 || input.num_rows() == 0) { return std::make_pair(std::make_unique<column>(), table_view{}); } // Check datatype homogeneity auto const dtype = input.column(0).type(); CUDF_EXPECTS( std::all_of( input.begin(), input.end(), [dtype](auto const& col) { return dtype == col.type(); }), "Column type mismatch"); nvtx::range_push("CUDF_TRANSPOSE", nvtx::color::GREEN); auto output_column = cudf::interleave_columns(input, mr); auto one_iter = thrust::make_counting_iterator<size_type>(1); auto splits_iter = thrust::make_transform_iterator( one_iter, [width = input.num_columns()](size_type idx) { return idx * width; }); auto splits = std::vector<size_type>(splits_iter, splits_iter + input.num_rows() - 1); auto output_column_views = cudf::split(output_column->view(), splits); return std::make_pair(std::move(output_column), table_view(output_column_views)); } } // namespace detail std::pair<std::unique_ptr<column>, table_view> transpose(table_view const& input, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::transpose(input, mr); } } // namespace cudf
cfc88b6726d8703389deaf1d47de74dd4293d8c8.hip
// !!! This is a file automatically generated by hipify!!! #ifdef TARGET_GPU void GPU_ENTRY(init, SIMENGINE_STORAGE){ // FIXME Add more checking of capabilities and devices available! hipSetDevice(cutGetMaxGflopsDeviceId()); } void GPU_ENTRY(exit, SIMENGINE_STORAGE){ hipDeviceReset(); } // Takes a solver_props pointer on the CPU and returns a pointer to a mirrored structure on the GPU solver_props *GPU_ENTRY(init_props, SIMENGINE_STORAGE, solver_props *props){ // Local temp solver_props tprops; // GPU datastructures solver_props *dprops; // Copy the properties to local temporary memcpy(&tprops, props, sizeof(solver_props)); // Allocate GPU space for props and all pointer fields of props cutilSafeCall(hipMalloc((void**)&dprops, sizeof(solver_props))); cutilSafeCall(hipMalloc((void**)&tprops.time, props->num_models*sizeof(CDATAFORMAT))); if(props->statesize){ cutilSafeCall(hipMalloc((void**)&tprops.model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT))); } else{ tprops.model_states = NULL; } if(tprops.inputsize){ cutilSafeCall(hipMalloc((void**)&tprops.inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT))); } else{ tprops.inputs = NULL; } cutilSafeCall(hipMalloc((void**)&tprops.ob, props->ob_size)); cutilSafeCall(hipMemset(tprops.ob, 0, props->ob_size)); if(props->outputsize){ cutilSafeCall(hipMalloc((void**)&tprops.outputs, props->num_models*props->outputsize*sizeof(CDATAFORMAT))); } else{ tprops.outputs = NULL; } cutilSafeCall(hipMalloc((void**)&tprops.running, props->num_models*sizeof(CDATAFORMAT))); // Copy props to GPU cutilSafeCall(hipMemcpy(dprops, &tprops, sizeof(solver_props), hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(tprops.time, props->time, props->num_models*sizeof(CDATAFORMAT), hipMemcpyHostToDevice)); if(tprops.model_states) cutilSafeCall(hipMemcpy(tprops.model_states, props->model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT), hipMemcpyHostToDevice)); if(tprops.inputs) cutilSafeCall(hipMemcpy(tprops.inputs, props->inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT), hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(tprops.running, props->running, props->num_models*sizeof(CDATAFORMAT), hipMemcpyHostToDevice)); // Store pointers to GPU memory for data we need to be able to retrieve props->gpu.ob = tprops.ob; props->gpu.time = tprops.time; props->gpu.model_states = tprops.model_states; return dprops; } // Frees a GPU solver props structure void GPU_ENTRY(free_props, SIMENGINE_STORAGE, solver_props *props){ solver_props tprops; cutilSafeCall(hipMemcpy(&tprops, props, sizeof(solver_props), hipMemcpyDeviceToHost)); cutilSafeCall(hipFree(tprops.time)); if(tprops.model_states) cutilSafeCall(hipFree(tprops.model_states)); if(tprops.inputs) cutilSafeCall(hipFree(tprops.inputs)); cutilSafeCall(hipFree(tprops.ob)); if(tprops.outputs) cutilSafeCall(hipFree(tprops.outputs)); cutilSafeCall(hipFree(tprops.running)); cutilSafeCall(hipFree(props)); } #endif // #ifdef TARGET_GPU
cfc88b6726d8703389deaf1d47de74dd4293d8c8.cu
#ifdef TARGET_GPU void GPU_ENTRY(init, SIMENGINE_STORAGE){ // FIXME Add more checking of capabilities and devices available! cudaSetDevice(cutGetMaxGflopsDeviceId()); } void GPU_ENTRY(exit, SIMENGINE_STORAGE){ cudaThreadExit(); } // Takes a solver_props pointer on the CPU and returns a pointer to a mirrored structure on the GPU solver_props *GPU_ENTRY(init_props, SIMENGINE_STORAGE, solver_props *props){ // Local temp solver_props tprops; // GPU datastructures solver_props *dprops; // Copy the properties to local temporary memcpy(&tprops, props, sizeof(solver_props)); // Allocate GPU space for props and all pointer fields of props cutilSafeCall(cudaMalloc((void**)&dprops, sizeof(solver_props))); cutilSafeCall(cudaMalloc((void**)&tprops.time, props->num_models*sizeof(CDATAFORMAT))); if(props->statesize){ cutilSafeCall(cudaMalloc((void**)&tprops.model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT))); } else{ tprops.model_states = NULL; } if(tprops.inputsize){ cutilSafeCall(cudaMalloc((void**)&tprops.inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT))); } else{ tprops.inputs = NULL; } cutilSafeCall(cudaMalloc((void**)&tprops.ob, props->ob_size)); cutilSafeCall(cudaMemset(tprops.ob, 0, props->ob_size)); if(props->outputsize){ cutilSafeCall(cudaMalloc((void**)&tprops.outputs, props->num_models*props->outputsize*sizeof(CDATAFORMAT))); } else{ tprops.outputs = NULL; } cutilSafeCall(cudaMalloc((void**)&tprops.running, props->num_models*sizeof(CDATAFORMAT))); // Copy props to GPU cutilSafeCall(cudaMemcpy(dprops, &tprops, sizeof(solver_props), cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(tprops.time, props->time, props->num_models*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice)); if(tprops.model_states) cutilSafeCall(cudaMemcpy(tprops.model_states, props->model_states, props->num_models*props->statesize*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice)); if(tprops.inputs) cutilSafeCall(cudaMemcpy(tprops.inputs, props->inputs, props->num_models*props->inputsize*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(tprops.running, props->running, props->num_models*sizeof(CDATAFORMAT), cudaMemcpyHostToDevice)); // Store pointers to GPU memory for data we need to be able to retrieve props->gpu.ob = tprops.ob; props->gpu.time = tprops.time; props->gpu.model_states = tprops.model_states; return dprops; } // Frees a GPU solver props structure void GPU_ENTRY(free_props, SIMENGINE_STORAGE, solver_props *props){ solver_props tprops; cutilSafeCall(cudaMemcpy(&tprops, props, sizeof(solver_props), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaFree(tprops.time)); if(tprops.model_states) cutilSafeCall(cudaFree(tprops.model_states)); if(tprops.inputs) cutilSafeCall(cudaFree(tprops.inputs)); cutilSafeCall(cudaFree(tprops.ob)); if(tprops.outputs) cutilSafeCall(cudaFree(tprops.outputs)); cutilSafeCall(cudaFree(tprops.running)); cutilSafeCall(cudaFree(props)); } #endif // #ifdef TARGET_GPU
8fe6f6c3cb4c059d25c33f04987429a1f113ab00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #ifndef vector #define vector 1 #endif #if (vector==1) #define intvector int #elif (vector == 2) #define intvector int2 #elif (vector == 4) #define intvector int4 #endif #if use_shuffle == 1 #define stop_loop 16 #elif use_shuffle == 0 #define stop_loop 0 #endif #define MAX_LOC(old_v, new_v, old_idx, new_idx) \ if (new_v > old_v) { \ old_v = new_v; \ old_idx = new_idx; \ } \ #define SET_LOC(loc, i) \ __global__ void max_loc(int *max_idx, int *max_val, intvector *location, intvector *array, int use_index, int n) { int ti = threadIdx.x; int x = blockIdx.x * block_size_x + threadIdx.x; int step_size = num_blocks * block_size_x; int lmax = 0.0f; int lidx = 0; //cooperatively iterate over input array with all thread blocks for (int i=x; i<n/vector; i+=step_size) { intvector v = array[i]; intvector loc; if (use_index == 0) { loc = location[i]; } else { #if vector == 1 loc = i; #elif vector == 2 loc.x = i*vector; loc.y = i*vector+1; #elif vector == 4 loc.x = i*vector; loc.y = i*vector+1; loc.z = i*vector+2; loc.w = i*vector+3; #endif } #if vector == 1 MAX_LOC(lmax, v, lidx, loc) #elif vector == 2 MAX_LOC(lmax, v.x, lidx, loc.x) MAX_LOC(lmax, v.y, lidx, loc.y) #elif vector == 4 MAX_LOC(lmax, v.x, lidx, loc.x) MAX_LOC(lmax, v.y, lidx, loc.y) MAX_LOC(lmax, v.z, lidx, loc.z) MAX_LOC(lmax, v.w, lidx, loc.w) #endif } //reduce sum to single value (or last 32 in case of use_shuffle) __shared__ int sh_max[block_size_x]; __shared__ int sh_idx[block_size_x]; sh_max[ti] = lmax; sh_idx[ti] = lidx; __syncthreads(); #pragma unroll for (unsigned int s=block_size_x/2; s>stop_loop; s>>=1) { if (ti < s) { MAX_LOC(sh_max[ti], sh_max[ti + s], sh_idx[ti], sh_idx[ti+s]) } __syncthreads(); } //reduce last 32 values to single value using warp shuffle instructions #if use_shuffle == 1 if (ti < 32) { lmax = sh_max[ti]; lidx = sh_idx[ti]; #pragma unroll for (unsigned int s=16; s>0; s>>=1) { int v = __shfl_down(lmax, s); int i = __shfl_down(lidx, s); MAX_LOC(lmax, v, lidx, i) } } #else if (ti == 0) { lmax = sh_max[0]; lidx = sh_idx[0]; } #endif //write back one value per thread block, run kernel again with one tread block if (ti == 0) { max_val[blockIdx.x] = lmax; max_idx[blockIdx.x] = lidx; } }
8fe6f6c3cb4c059d25c33f04987429a1f113ab00.cu
#include <stdio.h> #ifndef vector #define vector 1 #endif #if (vector==1) #define intvector int #elif (vector == 2) #define intvector int2 #elif (vector == 4) #define intvector int4 #endif #if use_shuffle == 1 #define stop_loop 16 #elif use_shuffle == 0 #define stop_loop 0 #endif #define MAX_LOC(old_v, new_v, old_idx, new_idx) \ if (new_v > old_v) { \ old_v = new_v; \ old_idx = new_idx; \ } \ #define SET_LOC(loc, i) \ __global__ void max_loc(int *max_idx, int *max_val, intvector *location, intvector *array, int use_index, int n) { int ti = threadIdx.x; int x = blockIdx.x * block_size_x + threadIdx.x; int step_size = num_blocks * block_size_x; int lmax = 0.0f; int lidx = 0; //cooperatively iterate over input array with all thread blocks for (int i=x; i<n/vector; i+=step_size) { intvector v = array[i]; intvector loc; if (use_index == 0) { loc = location[i]; } else { #if vector == 1 loc = i; #elif vector == 2 loc.x = i*vector; loc.y = i*vector+1; #elif vector == 4 loc.x = i*vector; loc.y = i*vector+1; loc.z = i*vector+2; loc.w = i*vector+3; #endif } #if vector == 1 MAX_LOC(lmax, v, lidx, loc) #elif vector == 2 MAX_LOC(lmax, v.x, lidx, loc.x) MAX_LOC(lmax, v.y, lidx, loc.y) #elif vector == 4 MAX_LOC(lmax, v.x, lidx, loc.x) MAX_LOC(lmax, v.y, lidx, loc.y) MAX_LOC(lmax, v.z, lidx, loc.z) MAX_LOC(lmax, v.w, lidx, loc.w) #endif } //reduce sum to single value (or last 32 in case of use_shuffle) __shared__ int sh_max[block_size_x]; __shared__ int sh_idx[block_size_x]; sh_max[ti] = lmax; sh_idx[ti] = lidx; __syncthreads(); #pragma unroll for (unsigned int s=block_size_x/2; s>stop_loop; s>>=1) { if (ti < s) { MAX_LOC(sh_max[ti], sh_max[ti + s], sh_idx[ti], sh_idx[ti+s]) } __syncthreads(); } //reduce last 32 values to single value using warp shuffle instructions #if use_shuffle == 1 if (ti < 32) { lmax = sh_max[ti]; lidx = sh_idx[ti]; #pragma unroll for (unsigned int s=16; s>0; s>>=1) { int v = __shfl_down(lmax, s); int i = __shfl_down(lidx, s); MAX_LOC(lmax, v, lidx, i) } } #else if (ti == 0) { lmax = sh_max[0]; lidx = sh_idx[0]; } #endif //write back one value per thread block, run kernel again with one tread block if (ti == 0) { max_val[blockIdx.x] = lmax; max_idx[blockIdx.x] = lidx; } }
f5a79224417815bd90fd174a661fda3426bbcd23.hip
// !!! This is a file automatically generated by hipify!!! // cudaDCA.cu // //This file contains the recursive DCA function, and the function that is used to invoke DCA and //interperate the results. //Included Files #include <iostream> #include <vector> #include "classes.h" //Function Prototypes // Functions found in this file void RecDCA(std::vector<Body> bodies, std::vector<Forces> AF, int n,int cut_off); // Functions found in Init_setup.cu void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]); // Functions found in Assemble_setup.cu void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); // Functions found in Disassemble_setup.cu void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); // Functions found in Assemble.cu void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); // Functions found in Disassemble.cu void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); // Functions found in SolveBCs.cu void solve_BCs(double Zs[], double Xs[], double AF[]); void printa(double A[], int n); void printm(double A[6][6]); void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]); //DCAhelp: // Function that prepares the list of bodies for DCA and finds the final state vector // state is the state of the system at that timestep // bs is a list of bodies used for initialization // js is a list of joints // n is the number of bodies // Y is the array where the final velocities and accelerations are stored void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, std::vector<Body> bodies , std::vector<Forces> AF, double *Zs, float times[], int reps) { //Create the list that will hold all acceleration and force values for all bodies Update_Properties(Zs,bodies[0].Zs,n,state,m,l,I); //CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values //Pass the list of bodies to DCA and return the accelerations //and forces of both handles of every body in the list hipEvent_t beginEvent; hipEvent_t endEvent; hipEventCreate( &beginEvent ); hipEventCreate( &endEvent ); for(int i= 0; i<reps; i++) { hipEventRecord( beginEvent, 0 ); RecDCA(bodies,AF, n, cut_off); hipEventRecord( endEvent, 0 ); hipEventSynchronize( endEvent ); hipEventElapsedTime( &times[i], beginEvent, endEvent ); } Y[n]=AF[0].af[8*n]; //For a pendulum, the fist acceleration value is in A[2][0] for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix { Y[i]=AF[0].af[2*4*n+2*j]-AF[0].af[2*4*n+2*(j-1)]; //Find and save all generalized accelerations } for(int i = 0; i<n; i++) //Loop through the state vector { Y[i]=state[i+n]; //Save the velocities } //Free memory } //RecDCA: // Function used to solve for the velocty and acceleration of the list of bodies at // the current timestep. This is a recursive function that continues to call itself // until there is a single body left. Once at this point the accelerations and forces // are found using the boundary conditions of a pendulum. These values are then returned // to the previous level of recursion which then finds the new accelerations and forces // for the disassembled bodies. This continues until all bodies are disassembled, ultimately // returning the forces and accelerations at both handles of every body in the system. These // results are intererated by DCAhelp (above) to obtain the actual generalized accelerations. // bodies is the list of bodies // n is the number of bodies // i is the level of recursion // AF is the array in which the accelerations and forces at the handles of the bodies // will be stored. void RecDCA(std::vector<Body> bodies, std::vector<Forces> AF, int n,int cut_off) { int *nums = (int*)malloc(sizeof(int)*bodies.size()); int x = n; int newlen; int odd; int i =0; while(x!=1) { odd=0; nums[i]=x; if(x%2==0) { newlen = (int) (x/2); } else { odd =1; newlen=(int)((x+1)/2); } if(i<cut_off) { cudaAssemble(bodies[i].Zs,bodies[i].Xs, x, bodies[i+1].Zs, bodies[i+1].Xs , odd, newlen); //Assemble the bodies, storing them in newbds } else { Assemble(bodies[i].Zs,bodies[i].Xs,bodies[i+1].Zs,bodies[i+1].Xs, newlen,odd, x); //Assemble the bodies, storing them in newbds } i++; x=newlen; } nums[i]=x; //Call the DCA function again to return the accelerations and forces of the new bodies solve_BCs(bodies[i].Zs,bodies[i].Xs, AF[i].af); //Solve the boundary conditions and find the acceleratins and forces //Knowing the accelerations and forces of the new bodies, the new bodies can be disassembled //again, finding the accelerations and forces of the old bodies. while(i!=0) { newlen = nums[i]; x=nums[i-1]; if(x%2==0) { odd = 0; } else { odd = 1; } if(i-1<cut_off) { cudaDisassemble(AF[i].af, bodies[i-1].Zs,bodies[i-1].Xs , bodies[i].Zs,bodies[i].Xs, odd, x, newlen, AF[i-1].af); } else { Disassemble(bodies[i].Zs,bodies[i].Xs,bodies[i-1].Zs,bodies[i-1].Xs,AF[i].af, AF[i-1].af, newlen,odd); } i--; } }
f5a79224417815bd90fd174a661fda3426bbcd23.cu
// cudaDCA.cu // //This file contains the recursive DCA function, and the function that is used to invoke DCA and //interperate the results. //Included Files #include <iostream> #include <vector> #include "classes.h" //Function Prototypes // Functions found in this file void RecDCA(std::vector<Body> bodies, std::vector<Forces> AF, int n,int cut_off); // Functions found in Init_setup.cu void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]); // Functions found in Assemble_setup.cu void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen); // Functions found in Disassemble_setup.cu void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]); // Functions found in Assemble.cu void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); // Functions found in Disassemble.cu void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); // Functions found in SolveBCs.cu void solve_BCs(double Zs[], double Xs[], double AF[]); void printa(double A[], int n); void printm(double A[6][6]); void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]); //DCAhelp: // Function that prepares the list of bodies for DCA and finds the final state vector // state is the state of the system at that timestep // bs is a list of bodies used for initialization // js is a list of joints // n is the number of bodies // Y is the array where the final velocities and accelerations are stored void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, std::vector<Body> bodies , std::vector<Forces> AF, double *Zs, float times[], int reps) { //Create the list that will hold all acceleration and force values for all bodies Update_Properties(Zs,bodies[0].Zs,n,state,m,l,I); //CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values //Pass the list of bodies to DCA and return the accelerations //and forces of both handles of every body in the list cudaEvent_t beginEvent; cudaEvent_t endEvent; cudaEventCreate( &beginEvent ); cudaEventCreate( &endEvent ); for(int i= 0; i<reps; i++) { cudaEventRecord( beginEvent, 0 ); RecDCA(bodies,AF, n, cut_off); cudaEventRecord( endEvent, 0 ); cudaEventSynchronize( endEvent ); cudaEventElapsedTime( &times[i], beginEvent, endEvent ); } Y[n]=AF[0].af[8*n]; //For a pendulum, the fist acceleration value is in A[2][0] for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix { Y[i]=AF[0].af[2*4*n+2*j]-AF[0].af[2*4*n+2*(j-1)]; //Find and save all generalized accelerations } for(int i = 0; i<n; i++) //Loop through the state vector { Y[i]=state[i+n]; //Save the velocities } //Free memory } //RecDCA: // Function used to solve for the velocty and acceleration of the list of bodies at // the current timestep. This is a recursive function that continues to call itself // until there is a single body left. Once at this point the accelerations and forces // are found using the boundary conditions of a pendulum. These values are then returned // to the previous level of recursion which then finds the new accelerations and forces // for the disassembled bodies. This continues until all bodies are disassembled, ultimately // returning the forces and accelerations at both handles of every body in the system. These // results are intererated by DCAhelp (above) to obtain the actual generalized accelerations. // bodies is the list of bodies // n is the number of bodies // i is the level of recursion // AF is the array in which the accelerations and forces at the handles of the bodies // will be stored. void RecDCA(std::vector<Body> bodies, std::vector<Forces> AF, int n,int cut_off) { int *nums = (int*)malloc(sizeof(int)*bodies.size()); int x = n; int newlen; int odd; int i =0; while(x!=1) { odd=0; nums[i]=x; if(x%2==0) { newlen = (int) (x/2); } else { odd =1; newlen=(int)((x+1)/2); } if(i<cut_off) { cudaAssemble(bodies[i].Zs,bodies[i].Xs, x, bodies[i+1].Zs, bodies[i+1].Xs , odd, newlen); //Assemble the bodies, storing them in newbds } else { Assemble(bodies[i].Zs,bodies[i].Xs,bodies[i+1].Zs,bodies[i+1].Xs, newlen,odd, x); //Assemble the bodies, storing them in newbds } i++; x=newlen; } nums[i]=x; //Call the DCA function again to return the accelerations and forces of the new bodies solve_BCs(bodies[i].Zs,bodies[i].Xs, AF[i].af); //Solve the boundary conditions and find the acceleratins and forces //Knowing the accelerations and forces of the new bodies, the new bodies can be disassembled //again, finding the accelerations and forces of the old bodies. while(i!=0) { newlen = nums[i]; x=nums[i-1]; if(x%2==0) { odd = 0; } else { odd = 1; } if(i-1<cut_off) { cudaDisassemble(AF[i].af, bodies[i-1].Zs,bodies[i-1].Xs , bodies[i].Zs,bodies[i].Xs, odd, x, newlen, AF[i-1].af); } else { Disassemble(bodies[i].Zs,bodies[i].Xs,bodies[i-1].Zs,bodies[i-1].Xs,AF[i].af, AF[i-1].af, newlen,odd); } i--; } }
067906fc5e04e8014a43d78b6f12187dd3faaf72.hip
// !!! This is a file automatically generated by hipify!!! /* * Inverse Discrete Cosine Transform in Column wise (DCT two) * DCT_II_Column_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_II_Column_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_II_Column_Inverse.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DCTII_Column_Inverse_Kernel(double *A, double *C, int numARows, int numAColumns, int numCRows, int numCColumns) { double CValue = 0.0; const double PI_d = 3.141592653589793238462643383279502884; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ double As[TILE_DIM][TILE_DIM]; __shared__ double Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = cos(((2 * (threadIdx.x + (k*TILE_DIM)) + 1) / (2.0 * numARows))*PI_d*Row)*sqrt(1.0 / (1 + DELTA(1, Row + 1)))*sqrt(2.0 / numARows); } else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTInverseColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix //double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //float * hostComputedC; double * deviceA; //double * deviceB; double * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns)); //hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns)); //thrust::device_ptr< double >dev_ptr_A(deviceA); //thrust::device_ptr< double >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice)); //hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////// dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; }
067906fc5e04e8014a43d78b6f12187dd3faaf72.cu
/* * Inverse Discrete Cosine Transform in Column wise (DCT two) * DCT_II_Column_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_II_Column_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_II_Column_Inverse.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cuda.h> #include <cuda_runtime.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DCTII_Column_Inverse_Kernel(double *A, double *C, int numARows, int numAColumns, int numCRows, int numCColumns) { double CValue = 0.0; const double PI_d = 3.141592653589793238462643383279502884; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ double As[TILE_DIM][TILE_DIM]; __shared__ double Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = cos(((2 * (threadIdx.x + (k*TILE_DIM)) + 1) / (2.0 * numARows))*PI_d*Row)*sqrt(1.0 / (1 + DELTA(1, Row + 1)))*sqrt(2.0 / numARows); } else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTInverseColumnTwo(double * A, double * C, int numARows, int numAColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix //double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //float * hostComputedC; double * deviceA; //double * deviceB; double * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns)); //cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns); gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns)); //thrust::device_ptr< double >dev_ptr_A(deviceA); //thrust::device_ptr< double >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice)); //cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////// dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; }
6894c7343e53cd529e2314dd53f9979687d0f83b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/reorg_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> __global__ void Reorg(const int nthreads, const Dtype *in_data, const bool reverse, const int stride, const int width, const int height, const int channels, const int batch_num, Dtype *out_data) { int channels_out = channels / (stride * stride); int width_out = width * stride; int height_out = height * stride; int c_h_w = channels * width * height; int w_h = width * height; CUDA_KERNEL_LOOP(index, nthreads) { int index_batch_num = index / (c_h_w); int index_channels = index % (c_h_w) / (w_h); int index_height = index % (c_h_w) % (w_h) / width; int index_width = index % (c_h_w) % (w_h) % width; int c2 = index_channels % channels_out; int offset = index_channels / channels_out; int w2 = index_width * stride + offset % stride; int h2 = index_height * stride + offset / stride; // int out_index = ((((batch_num * channels_out + index_batch_num) + c2) * height_out + h2) * width_out) + w2; int out_index = ((index_batch_num * channels_out + c2) * height_out + h2) * width_out + w2; if (reverse) out_data[out_index] = in_data[index]; else out_data[index] = in_data[out_index]; } } template<typename Dtype> void ReorgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); const int nthreads = bottom[0]->count(); hipLaunchKernelGGL(( Reorg<Dtype>) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, nthreads, bottom_data, reverse_, stride_, width_, height_, channels_, batch_num_, top_data); } template <typename Dtype> void ReorgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if(!propagate_down[0]){ return; } const Dtype *top_diff = top[0]->gpu_diff(); Dtype *bottom_diff = top[0]->mutable_gpu_diff(); const int nthreads = bottom[0]->count(); hipLaunchKernelGGL(( Reorg<Dtype>) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, nthreads, top_diff, !reverse_, stride_, width_, height_, channels_, batch_num_, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(ReorgLayer); } // namespace caffe
6894c7343e53cd529e2314dd53f9979687d0f83b.cu
#include <algorithm> #include <vector> #include "caffe/layers/reorg_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> __global__ void Reorg(const int nthreads, const Dtype *in_data, const bool reverse, const int stride, const int width, const int height, const int channels, const int batch_num, Dtype *out_data) { int channels_out = channels / (stride * stride); int width_out = width * stride; int height_out = height * stride; int c_h_w = channels * width * height; int w_h = width * height; CUDA_KERNEL_LOOP(index, nthreads) { int index_batch_num = index / (c_h_w); int index_channels = index % (c_h_w) / (w_h); int index_height = index % (c_h_w) % (w_h) / width; int index_width = index % (c_h_w) % (w_h) % width; int c2 = index_channels % channels_out; int offset = index_channels / channels_out; int w2 = index_width * stride + offset % stride; int h2 = index_height * stride + offset / stride; // int out_index = ((((batch_num * channels_out + index_batch_num) + c2) * height_out + h2) * width_out) + w2; int out_index = ((index_batch_num * channels_out + c2) * height_out + h2) * width_out + w2; if (reverse) out_data[out_index] = in_data[index]; else out_data[index] = in_data[out_index]; } } template<typename Dtype> void ReorgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); const int nthreads = bottom[0]->count(); Reorg<Dtype> <<< CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >>> ( nthreads, bottom_data, reverse_, stride_, width_, height_, channels_, batch_num_, top_data); } template <typename Dtype> void ReorgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if(!propagate_down[0]){ return; } const Dtype *top_diff = top[0]->gpu_diff(); Dtype *bottom_diff = top[0]->mutable_gpu_diff(); const int nthreads = bottom[0]->count(); Reorg<Dtype> <<< CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >>>( nthreads, top_diff, !reverse_, stride_, width_, height_, channels_, batch_num_, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(ReorgLayer); } // namespace caffe
95a96b2524ff3d4d6ad5c64b2eebd09c5c56ff8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cudaS_ssdToOutput_kernels( unsigned int batchSize, unsigned int nbClass, unsigned int nbAnchors, unsigned int channelWidth, unsigned int channelHeight, unsigned int nbProposals, unsigned int* nbValidROIs, unsigned int cls, unsigned int totalParts, unsigned int totalTemplates, unsigned int maxParts, unsigned int maxTemplates, unsigned int cumulParts, unsigned int cumulTemplates, unsigned int nbParts, unsigned int nbTemplates, float xRatio, float yRatio, float xOutputRatio, float yOutputRatio, const float* roi_bbox, const float* roi_anchors, const float* anchors, const float* inputs_parts, const float* inputs_templates, float* outputs) { const int batchPos = blockIdx.z; const int proposal = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x; const int ptIdx = blockIdx.y; const int nbDetectedObject = (int) nbValidROIs[batchPos]; const int nbIdx = 6; if(proposal < nbProposals) { const unsigned int n = proposal + cls*nbProposals + batchPos*nbProposals*nbClass; if(proposal < nbDetectedObject) { if(ptIdx == 0) { outputs[0 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[0 + 5*proposal + batchPos*nbProposals*5]; outputs[1 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[1 + 5*proposal + batchPos*nbProposals*5]; outputs[2 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[2 + 5*proposal + batchPos*nbProposals*5]; outputs[3 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[3 + 5*proposal + batchPos*nbProposals*5]; outputs[4 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[4 + 5*proposal + batchPos*nbProposals*5]; outputs[5 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = (float) cls; } if(ptIdx < nbParts && nbParts > 0) { const unsigned int xa = roi_anchors[0 + 5*proposal + batchPos*nbProposals*5]; const unsigned int ya = roi_anchors[1 + 5*proposal + batchPos*nbProposals*5]; const unsigned int k = roi_anchors[2 + 5*proposal + batchPos*nbProposals*5]; const int yIdx = xa + ya*channelWidth + (k*nbParts*2 + cumulParts + ptIdx*2)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*2*totalParts; const int xIdx = xa + ya*channelWidth + (k*nbParts*2 + cumulParts + ptIdx*2 + 1)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*2*totalParts; const float partY = inputs_parts[yIdx]; const float partX = inputs_parts[xIdx]; const int xa0 = (int)(anchors[cls*nbAnchors*4 + k*4] + xa * xRatio); const int ya0 = (int)(anchors[cls*nbAnchors*4 + k*4 + 1] + ya * yRatio); const int xa1 = (int)(anchors[cls*nbAnchors*4 + k*4 + 2] + xa * xRatio); const int ya1 = (int)(anchors[cls*nbAnchors*4 + k*4 + 3] + ya * yRatio); // Anchors width and height const int wa = xa1 - xa0; const int ha = ya1 - ya0; // Anchor center coordinates (xac, yac) const float xac = xa0 + wa / 2.0; const float yac = ya0 + ha / 2.0; const float predPartY = ((partY) * ha + yac)*yOutputRatio ; const float predPartX = ((partX) * wa + xac)*xOutputRatio ; outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = predPartY; outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = predPartX; } else if(ptIdx < maxParts && maxParts > 0) { outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } ///for(unsigned int t = 0; t < nbTemplates; ++t) if(ptIdx < nbTemplates && nbTemplates > 0) { const unsigned int xa = roi_anchors[0 + 5*proposal + batchPos*nbProposals*5]; const unsigned int ya = roi_anchors[1 + 5*proposal + batchPos*nbProposals*5]; const unsigned int k = roi_anchors[2 + 5*proposal + batchPos*nbProposals*5]; const int yIdx = xa + ya*channelWidth + (k*nbTemplates*3 + cumulTemplates + ptIdx*3)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates; const int xIdx = xa + ya*channelWidth + (k*nbTemplates*3 + cumulTemplates + ptIdx*3 + 1)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates; const int zIdx = xa + ya*channelWidth + (k*nbTemplates*3 + cumulTemplates + ptIdx*3 + 2)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates; const float templateY = expf(inputs_templates[yIdx]); const float templateX = expf(inputs_templates[xIdx]); const float templateZ = expf(inputs_templates[zIdx]); outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateY; outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateX; outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateZ; } else if(ptIdx < maxTemplates && maxTemplates > 0) { outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } } else { outputs[0 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[1 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[2 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[3 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[4 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; //for(unsigned int p = 0; p < nbParts; ++p) if(ptIdx < maxParts && maxParts > 0) { outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } //for(unsigned int t = 0;t < nbTemplates; ++t) if(ptIdx < maxTemplates && maxTemplates > 0) { outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } } } }
95a96b2524ff3d4d6ad5c64b2eebd09c5c56ff8e.cu
#include "includes.h" __global__ void cudaS_ssdToOutput_kernels( unsigned int batchSize, unsigned int nbClass, unsigned int nbAnchors, unsigned int channelWidth, unsigned int channelHeight, unsigned int nbProposals, unsigned int* nbValidROIs, unsigned int cls, unsigned int totalParts, unsigned int totalTemplates, unsigned int maxParts, unsigned int maxTemplates, unsigned int cumulParts, unsigned int cumulTemplates, unsigned int nbParts, unsigned int nbTemplates, float xRatio, float yRatio, float xOutputRatio, float yOutputRatio, const float* roi_bbox, const float* roi_anchors, const float* anchors, const float* inputs_parts, const float* inputs_templates, float* outputs) { const int batchPos = blockIdx.z; const int proposal = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x; const int ptIdx = blockIdx.y; const int nbDetectedObject = (int) nbValidROIs[batchPos]; const int nbIdx = 6; if(proposal < nbProposals) { const unsigned int n = proposal + cls*nbProposals + batchPos*nbProposals*nbClass; if(proposal < nbDetectedObject) { if(ptIdx == 0) { outputs[0 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[0 + 5*proposal + batchPos*nbProposals*5]; outputs[1 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[1 + 5*proposal + batchPos*nbProposals*5]; outputs[2 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[2 + 5*proposal + batchPos*nbProposals*5]; outputs[3 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[3 + 5*proposal + batchPos*nbProposals*5]; outputs[4 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[4 + 5*proposal + batchPos*nbProposals*5]; outputs[5 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = (float) cls; } if(ptIdx < nbParts && nbParts > 0) { const unsigned int xa = roi_anchors[0 + 5*proposal + batchPos*nbProposals*5]; const unsigned int ya = roi_anchors[1 + 5*proposal + batchPos*nbProposals*5]; const unsigned int k = roi_anchors[2 + 5*proposal + batchPos*nbProposals*5]; const int yIdx = xa + ya*channelWidth + (k*nbParts*2 + cumulParts + ptIdx*2)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*2*totalParts; const int xIdx = xa + ya*channelWidth + (k*nbParts*2 + cumulParts + ptIdx*2 + 1)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*2*totalParts; const float partY = inputs_parts[yIdx]; const float partX = inputs_parts[xIdx]; const int xa0 = (int)(anchors[cls*nbAnchors*4 + k*4] + xa * xRatio); const int ya0 = (int)(anchors[cls*nbAnchors*4 + k*4 + 1] + ya * yRatio); const int xa1 = (int)(anchors[cls*nbAnchors*4 + k*4 + 2] + xa * xRatio); const int ya1 = (int)(anchors[cls*nbAnchors*4 + k*4 + 3] + ya * yRatio); // Anchors width and height const int wa = xa1 - xa0; const int ha = ya1 - ya0; // Anchor center coordinates (xac, yac) const float xac = xa0 + wa / 2.0; const float yac = ya0 + ha / 2.0; const float predPartY = ((partY) * ha + yac)*yOutputRatio ; const float predPartX = ((partX) * wa + xac)*xOutputRatio ; outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = predPartY; outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = predPartX; } else if(ptIdx < maxParts && maxParts > 0) { outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } ///for(unsigned int t = 0; t < nbTemplates; ++t) if(ptIdx < nbTemplates && nbTemplates > 0) { const unsigned int xa = roi_anchors[0 + 5*proposal + batchPos*nbProposals*5]; const unsigned int ya = roi_anchors[1 + 5*proposal + batchPos*nbProposals*5]; const unsigned int k = roi_anchors[2 + 5*proposal + batchPos*nbProposals*5]; const int yIdx = xa + ya*channelWidth + (k*nbTemplates*3 + cumulTemplates + ptIdx*3)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates; const int xIdx = xa + ya*channelWidth + (k*nbTemplates*3 + cumulTemplates + ptIdx*3 + 1)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates; const int zIdx = xa + ya*channelWidth + (k*nbTemplates*3 + cumulTemplates + ptIdx*3 + 2)*channelHeight*channelWidth + batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates; const float templateY = expf(inputs_templates[yIdx]); const float templateX = expf(inputs_templates[xIdx]); const float templateZ = expf(inputs_templates[zIdx]); outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateY; outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateX; outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateZ; } else if(ptIdx < maxTemplates && maxTemplates > 0) { outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } } else { outputs[0 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[1 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[2 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[3 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; outputs[4 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0; //for(unsigned int p = 0; p < nbParts; ++p) if(ptIdx < maxParts && maxParts > 0) { outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } //for(unsigned int t = 0;t < nbTemplates; ++t) if(ptIdx < maxTemplates && maxTemplates > 0) { outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0; } } } }
6d263ece25a66de60839f71cc81d75e98fbf6cf2.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <vector> #include "hip/hip_runtime.h" #include "caffe/layer.hpp" #include"caffe/layers/spatial_transformer_layer.hpp" namespace caffe { // compute each Vi template <typename Dtype> __global__ void ComputeSource(const int total, const int num, const int output_h, const int output_w,const int input_h, const int input_w, const Dtype* target_data, const Dtype* theta, Dtype* source_data, int* source_range_data) { // total = num * output_h * output_w CUDA_KERNEL_LOOP(index, total) { int div = output_h * output_w; int n = index / div; int n_rem = index % div; div /= output_h; int h = n_rem / div; int w = n_rem % div; Dtype x_target = target_data[h * output_w + w]; Dtype y_target = target_data[h * output_w + w + output_w * output_h]; int offset_theta = 6 * n; Dtype x = x_target * theta[offset_theta] + y_target * theta[offset_theta + 1] + theta[offset_theta + 2]; Dtype y = x_target * theta[offset_theta + 3] + y_target * theta[offset_theta + 4] + theta[offset_theta + 5]; x = (x + (Dtype) 1.) / (Dtype) 2. * (input_w - 1); y = (y + (Dtype) 1.) / (Dtype) 2. * (input_h - 1); int offset_source = n * output_h * output_w * 2 + h * output_w + w; source_data[offset_source] = x; source_data[offset_source + output_h * output_w] = y; int w_min = (floor(x) > 0) ? floor(x) : 0; int w_max = (ceil(x) < input_w - 1) ? ceil(x) : (input_w - 1); int h_min = (floor(y) > 0) ? floor(y) : 0; int h_max = (ceil(y) < input_h - 1) ? ceil(y) : (input_h - 1); int offset_range = (n * output_h * output_w + h * output_w + w) * 4; source_range_data[offset_range] = w_min; source_range_data[offset_range + 1] = w_max; source_range_data[offset_range + 2] = h_min; source_range_data[offset_range + 3] = h_max; } } template <typename Dtype> __global__ void AffineForward(const int count, const int channels, const int height, const int width,const int input_h,const int input_w, const Dtype* in, const Dtype* source_data, const int* source_range_data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { int div = channels * height * width; int n = index / div; int n_rem = index % div; div /= channels; int c = n_rem / div; int c_rem = n_rem % div; div /= height; int h = c_rem / div; int w = c_rem % div; int offset_source = n * 2 * height * width + h * width + w; Dtype x = source_data[offset_source]; Dtype y = source_data[offset_source + height * width]; int offset_range = (n * height * width + h * width + w) * 4; int w_min = source_range_data[offset_range]; int w_max = source_range_data[offset_range + 1]; int h_min = source_range_data[offset_range + 2]; int h_max = source_range_data[offset_range + 3]; int offset_input = n * channels * input_h * input_w + c * input_h * input_w; int offset_output = n * channels * height * width + c * height * width; Dtype tmp = 0; for (int hh = h_min; hh <= h_max; ++hh) { for (int ww = w_min; ww <= w_max; ++ww) { tmp += in[offset_input + hh * input_w + ww]*(1 - fabs(x - ww)) * (1 - fabs(y - hh));//in bottom } } out[offset_output + h * width + w] = tmp; } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* theta_data = bottom[1]->mutable_gpu_data(); const Dtype* target_data = target_coordinates_.gpu_data(); Dtype* source_data = source_coordinates_.mutable_gpu_data(); int* range_data = source_sample_range_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set<Dtype>(count, 0, top_data); for(int n=0;n<num_;n++){ for(int j=0; j<6;j++){ if(is_pre_defined_theta[j]){ caffe_gpu_set<Dtype>(1, pre_defined_theta[j], theta_data+n*6+j); //theta_data[n*6 + j] = pre_defined_theta[j]; } } } ComputeSource<Dtype> << <CAFFE_GET_BLOCKS(num_ * output_h_ * output_w_), CAFFE_CUDA_NUM_THREADS >> >(num_ * output_h_ * output_w_, num_, output_h_, output_w_,input_h_, input_w_, target_data, theta_data, source_data, range_data); AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, channel_, output_h_, output_w_,input_h_, input_w_, bottom_data, source_data, range_data, top_data); CUDA_POST_KERNEL_CHECK; } __device__ inline void atomic_add(float * address, float val) { atomicAdd(address, val); } __device__ inline void atomic_add(double * address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } // compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta} // compute sum_{i} d{V_i} / d{U_nm} template <typename Dtype> __global__ void AffineBackward(const int count, const int num, const int channels, const int height, const int width, const int input_h, const int input_w, const Dtype* data, const Dtype* source_data, int* source_range_data, const Dtype* top_diff, Dtype* data_diff, Dtype* source_grad_cache) { // count = num * channel * height * width CUDA_KERNEL_LOOP(index, count) { int div = channels * height * width; int n = index / div; int n_rem = index % div; div /= channels; int c = n_rem / div; int c_rem = n_rem % div; div /= height; int h = c_rem / div; int w = c_rem % div; int offset_source = n * 2 * height * width + h * width + w; Dtype x = source_data[offset_source]; Dtype y = source_data[offset_source + height * width]; int offset_range = (n * height * width + h * width + w) * 4; int w_min = source_range_data[offset_range]; int w_max = source_range_data[offset_range + 1]; int h_min = source_range_data[offset_range + 2]; int h_max = source_range_data[offset_range + 3]; int source_diff_x = c * num * 2 * height * width + n * 2 * height * width + h * width + w; int source_diff_y = source_diff_x + height * width; Dtype tmp_source_x = 0; Dtype tmp_source_y = 0; Dtype buffer = top_diff[n * channels * height * width + c * height * width + h * width + w]; for (int hh = h_min; hh <= h_max; ++hh) { for (int ww = w_min; ww <= w_max; ++ww) { int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0)); int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0)); Dtype buffer2 = buffer * data[n * channels * input_h * input_w + c * input_h * input_w + hh * input_w + ww]; //bottom len-1??? Dtype tmp_hh = 1 - fabs(y - hh); Dtype tmp_ww = 1 - fabs(x - ww); tmp_source_x += buffer2 * tmp_hh * sign_x; tmp_source_y += buffer2 * tmp_ww * sign_y; Dtype inc = buffer * tmp_hh * tmp_ww; int offset = n * channels * input_h * input_w + c * input_h * input_w + hh * input_w + ww;//bottom diff atomic_add(data_diff + offset, inc); } } source_grad_cache[source_diff_x] = tmp_source_x * (width - 1) / (Dtype) 2.; source_grad_cache[source_diff_y] = tmp_source_y * (height - 1) / (Dtype) 2.; } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* data_diff = bottom[0]->mutable_gpu_diff(); Dtype* theta_diff = bottom[1]->mutable_gpu_diff(); Dtype* source_grad_cache = source_grad_cache_.mutable_gpu_data(); const Dtype* target_data = target_coordinates_.gpu_data(); const Dtype* source_data = source_coordinates_.gpu_data(); Dtype* source_diff = source_coordinates_.mutable_gpu_diff(); int* source_range_data = source_sample_range_.mutable_gpu_data(); caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff); int count = top[0]->count(); // compute gradient with respect to theta AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, num_, channel_, output_h_, output_w_,input_h_,input_w_, bottom_data, source_data, source_range_data, top_diff, data_diff, source_grad_cache); // merge gradient for theta caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, num_ * 2 * map_size_, channel_, Dtype(1), source_grad_op_.gpu_data(), source_grad_cache, Dtype(0), source_diff); for (int index = 0; index < num_; ++index) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, map_size_, Dtype(1), source_diff + index * 2 * map_size_, target_data, Dtype(0), theta_diff + index * 6); //tricky way for not changing fixed theta for(int j=0 ;j<6;j++){ if(is_pre_defined_theta[j]){ //theta_diff[index*6 + j] = 0; caffe_gpu_set<Dtype>(1,0, theta_diff+index*6+j); } } } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe
6d263ece25a66de60839f71cc81d75e98fbf6cf2.cu
#include <algorithm> #include <vector> #include "cuda.h" #include "caffe/layer.hpp" #include"caffe/layers/spatial_transformer_layer.hpp" namespace caffe { // compute each Vi template <typename Dtype> __global__ void ComputeSource(const int total, const int num, const int output_h, const int output_w,const int input_h, const int input_w, const Dtype* target_data, const Dtype* theta, Dtype* source_data, int* source_range_data) { // total = num * output_h * output_w CUDA_KERNEL_LOOP(index, total) { int div = output_h * output_w; int n = index / div; int n_rem = index % div; div /= output_h; int h = n_rem / div; int w = n_rem % div; Dtype x_target = target_data[h * output_w + w]; Dtype y_target = target_data[h * output_w + w + output_w * output_h]; int offset_theta = 6 * n; Dtype x = x_target * theta[offset_theta] + y_target * theta[offset_theta + 1] + theta[offset_theta + 2]; Dtype y = x_target * theta[offset_theta + 3] + y_target * theta[offset_theta + 4] + theta[offset_theta + 5]; x = (x + (Dtype) 1.) / (Dtype) 2. * (input_w - 1); y = (y + (Dtype) 1.) / (Dtype) 2. * (input_h - 1); int offset_source = n * output_h * output_w * 2 + h * output_w + w; source_data[offset_source] = x; source_data[offset_source + output_h * output_w] = y; int w_min = (floor(x) > 0) ? floor(x) : 0; int w_max = (ceil(x) < input_w - 1) ? ceil(x) : (input_w - 1); int h_min = (floor(y) > 0) ? floor(y) : 0; int h_max = (ceil(y) < input_h - 1) ? ceil(y) : (input_h - 1); int offset_range = (n * output_h * output_w + h * output_w + w) * 4; source_range_data[offset_range] = w_min; source_range_data[offset_range + 1] = w_max; source_range_data[offset_range + 2] = h_min; source_range_data[offset_range + 3] = h_max; } } template <typename Dtype> __global__ void AffineForward(const int count, const int channels, const int height, const int width,const int input_h,const int input_w, const Dtype* in, const Dtype* source_data, const int* source_range_data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { int div = channels * height * width; int n = index / div; int n_rem = index % div; div /= channels; int c = n_rem / div; int c_rem = n_rem % div; div /= height; int h = c_rem / div; int w = c_rem % div; int offset_source = n * 2 * height * width + h * width + w; Dtype x = source_data[offset_source]; Dtype y = source_data[offset_source + height * width]; int offset_range = (n * height * width + h * width + w) * 4; int w_min = source_range_data[offset_range]; int w_max = source_range_data[offset_range + 1]; int h_min = source_range_data[offset_range + 2]; int h_max = source_range_data[offset_range + 3]; int offset_input = n * channels * input_h * input_w + c * input_h * input_w; int offset_output = n * channels * height * width + c * height * width; Dtype tmp = 0; for (int hh = h_min; hh <= h_max; ++hh) { for (int ww = w_min; ww <= w_max; ++ww) { tmp += in[offset_input + hh * input_w + ww]*(1 - fabs(x - ww)) * (1 - fabs(y - hh));//in bottom } } out[offset_output + h * width + w] = tmp; } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* theta_data = bottom[1]->mutable_gpu_data(); const Dtype* target_data = target_coordinates_.gpu_data(); Dtype* source_data = source_coordinates_.mutable_gpu_data(); int* range_data = source_sample_range_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set<Dtype>(count, 0, top_data); for(int n=0;n<num_;n++){ for(int j=0; j<6;j++){ if(is_pre_defined_theta[j]){ caffe_gpu_set<Dtype>(1, pre_defined_theta[j], theta_data+n*6+j); //theta_data[n*6 + j] = pre_defined_theta[j]; } } } ComputeSource<Dtype> << <CAFFE_GET_BLOCKS(num_ * output_h_ * output_w_), CAFFE_CUDA_NUM_THREADS >> >(num_ * output_h_ * output_w_, num_, output_h_, output_w_,input_h_, input_w_, target_data, theta_data, source_data, range_data); AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, channel_, output_h_, output_w_,input_h_, input_w_, bottom_data, source_data, range_data, top_data); CUDA_POST_KERNEL_CHECK; } __device__ inline void atomic_add(float * address, float val) { atomicAdd(address, val); } __device__ inline void atomic_add(double * address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } // compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta} // compute sum_{i} d{V_i} / d{U_nm} template <typename Dtype> __global__ void AffineBackward(const int count, const int num, const int channels, const int height, const int width, const int input_h, const int input_w, const Dtype* data, const Dtype* source_data, int* source_range_data, const Dtype* top_diff, Dtype* data_diff, Dtype* source_grad_cache) { // count = num * channel * height * width CUDA_KERNEL_LOOP(index, count) { int div = channels * height * width; int n = index / div; int n_rem = index % div; div /= channels; int c = n_rem / div; int c_rem = n_rem % div; div /= height; int h = c_rem / div; int w = c_rem % div; int offset_source = n * 2 * height * width + h * width + w; Dtype x = source_data[offset_source]; Dtype y = source_data[offset_source + height * width]; int offset_range = (n * height * width + h * width + w) * 4; int w_min = source_range_data[offset_range]; int w_max = source_range_data[offset_range + 1]; int h_min = source_range_data[offset_range + 2]; int h_max = source_range_data[offset_range + 3]; int source_diff_x = c * num * 2 * height * width + n * 2 * height * width + h * width + w; int source_diff_y = source_diff_x + height * width; Dtype tmp_source_x = 0; Dtype tmp_source_y = 0; Dtype buffer = top_diff[n * channels * height * width + c * height * width + h * width + w]; for (int hh = h_min; hh <= h_max; ++hh) { for (int ww = w_min; ww <= w_max; ++ww) { int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0)); int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0)); Dtype buffer2 = buffer * data[n * channels * input_h * input_w + c * input_h * input_w + hh * input_w + ww]; //bottom len-1??? Dtype tmp_hh = 1 - fabs(y - hh); Dtype tmp_ww = 1 - fabs(x - ww); tmp_source_x += buffer2 * tmp_hh * sign_x; tmp_source_y += buffer2 * tmp_ww * sign_y; Dtype inc = buffer * tmp_hh * tmp_ww; int offset = n * channels * input_h * input_w + c * input_h * input_w + hh * input_w + ww;//bottom diff atomic_add(data_diff + offset, inc); } } source_grad_cache[source_diff_x] = tmp_source_x * (width - 1) / (Dtype) 2.; source_grad_cache[source_diff_y] = tmp_source_y * (height - 1) / (Dtype) 2.; } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* data_diff = bottom[0]->mutable_gpu_diff(); Dtype* theta_diff = bottom[1]->mutable_gpu_diff(); Dtype* source_grad_cache = source_grad_cache_.mutable_gpu_data(); const Dtype* target_data = target_coordinates_.gpu_data(); const Dtype* source_data = source_coordinates_.gpu_data(); Dtype* source_diff = source_coordinates_.mutable_gpu_diff(); int* source_range_data = source_sample_range_.mutable_gpu_data(); caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff); int count = top[0]->count(); // compute gradient with respect to theta AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, num_, channel_, output_h_, output_w_,input_h_,input_w_, bottom_data, source_data, source_range_data, top_diff, data_diff, source_grad_cache); // merge gradient for theta caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, num_ * 2 * map_size_, channel_, Dtype(1), source_grad_op_.gpu_data(), source_grad_cache, Dtype(0), source_diff); for (int index = 0; index < num_; ++index) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 2, 3, map_size_, Dtype(1), source_diff + index * 2 * map_size_, target_data, Dtype(0), theta_diff + index * 6); //tricky way for not changing fixed theta for(int j=0 ;j<6;j++){ if(is_pre_defined_theta[j]){ //theta_diff[index*6 + j] = 0; caffe_gpu_set<Dtype>(1,0, theta_diff+index*6+j); } } } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe
a16c0630d67972febcf6cb5daa77c92fd42144d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ KEY_TYPE getValue(DATA_TYPE value) { return SORT_KEY; } extern "C" { /** * Sort a list that is short enough to entirely fit in local memory. This is executed as * a single thread block. */ __global__ void sortShortList(DATA_TYPE* __restrict__ data, unsigned int length) { // Load the data into local memory. extern __shared__ DATA_TYPE dataBuffer[]; for (int index = threadIdx.x; index < length; index += blockDim.x) dataBuffer[index] = data[index]; __syncthreads(); // Perform a bitonic sort in local memory. for (unsigned int k = 2; k < 2*length; k *= 2) { for (unsigned int j = k/2; j > 0; j /= 2) { for (unsigned int i = threadIdx.x; i < length; i += blockDim.x) { int ixj = i^j; if (ixj > i && ixj < length) { DATA_TYPE value1 = dataBuffer[i]; DATA_TYPE value2 = dataBuffer[ixj]; bool ascending = ((i&k) == 0); for (unsigned int mask = k*2; mask < 2*length; mask *= 2) ascending = ((i&mask) == 0 ? !ascending : ascending); KEY_TYPE lowKey = (ascending ? getValue(value1) : getValue(value2)); KEY_TYPE highKey = (ascending ? getValue(value2) : getValue(value1)); if (lowKey > highKey) { dataBuffer[i] = value2; dataBuffer[ixj] = value1; } } } __syncthreads(); } } // Write the data back to global memory. for (int index = threadIdx.x; index < length; index += blockDim.x) data[index] = dataBuffer[index]; } /** * Calculate the minimum and maximum value in the array to be sorted. This kernel * is executed as a single work group. */ __global__ void computeRange(const DATA_TYPE* __restrict__ data, unsigned int length, KEY_TYPE* __restrict__ range, unsigned int numBuckets, unsigned int* __restrict__ bucketOffset) { extern __shared__ KEY_TYPE rangeBuffer[]; KEY_TYPE minimum = MAX_KEY; KEY_TYPE maximum = MIN_KEY; // Each thread calculates the range of a subset of values. for (unsigned int index = threadIdx.x; index < length; index += blockDim.x) { KEY_TYPE value = getValue(data[index]); minimum = min(minimum, value); maximum = max(maximum, value); } // Now reduce them. rangeBuffer[threadIdx.x] = minimum; __syncthreads(); for (unsigned int step = 1; step < blockDim.x; step *= 2) { if (threadIdx.x+step < blockDim.x && threadIdx.x%(2*step) == 0) rangeBuffer[threadIdx.x] = min(rangeBuffer[threadIdx.x], rangeBuffer[threadIdx.x+step]); __syncthreads(); } minimum = rangeBuffer[0]; __syncthreads(); rangeBuffer[threadIdx.x] = maximum; __syncthreads(); for (unsigned int step = 1; step < blockDim.x; step *= 2) { if (threadIdx.x+step < blockDim.x && threadIdx.x%(2*step) == 0) rangeBuffer[threadIdx.x] = max(rangeBuffer[threadIdx.x], rangeBuffer[threadIdx.x+step]); __syncthreads(); } maximum = rangeBuffer[0]; if (threadIdx.x == 0) { range[0] = minimum; range[1] = maximum; } // Clear the bucket counters in preparation for the next kernel. for (unsigned int index = threadIdx.x; index < numBuckets; index += blockDim.x) bucketOffset[index] = 0; } /** * Assign elements to buckets. */ __global__ void assignElementsToBuckets(const DATA_TYPE* __restrict__ data, unsigned int length, unsigned int numBuckets, const KEY_TYPE* __restrict__ range, unsigned int* bucketOffset, unsigned int* __restrict__ bucketOfElement, unsigned int* __restrict__ offsetInBucket) { float minValue = (float) (range[0]); float maxValue = (float) (range[1]); float bucketWidth = (maxValue-minValue)/numBuckets; for (unsigned int index = blockDim.x*blockIdx.x+threadIdx.x; index < length; index += blockDim.x*gridDim.x) { float key = (float) getValue(data[index]); unsigned int bucketIndex = min((unsigned int) ((key-minValue)/bucketWidth), numBuckets-1); offsetInBucket[index] = atomicAdd(&bucketOffset[bucketIndex], 1); bucketOfElement[index] = bucketIndex; } } /** * Sum the bucket sizes to compute the start position of each bucket. This kernel * is executed as a single work group. */ __global__ void computeBucketPositions(unsigned int numBuckets, unsigned int* __restrict__ bucketOffset) { extern __shared__ unsigned int posBuffer[]; unsigned int globalOffset = 0; for (unsigned int startBucket = 0; startBucket < numBuckets; startBucket += blockDim.x) { // Load the bucket sizes into local memory. unsigned int globalIndex = startBucket+threadIdx.x; posBuffer[threadIdx.x] = (globalIndex < numBuckets ? bucketOffset[globalIndex] : 0); __syncthreads(); // Perform a parallel prefix sum. for (unsigned int step = 1; step < blockDim.x; step *= 2) { unsigned int add = (threadIdx.x >= step ? posBuffer[threadIdx.x-step] : 0); __syncthreads(); posBuffer[threadIdx.x] += add; __syncthreads(); } // Write the results back to global memory. if (globalIndex < numBuckets) bucketOffset[globalIndex] = posBuffer[threadIdx.x]+globalOffset; globalOffset += posBuffer[blockDim.x-1]; } } /** * Copy the input data into the buckets for sorting. */ __global__ void copyDataToBuckets(const DATA_TYPE* __restrict__ data, DATA_TYPE* __restrict__ buckets, unsigned int length, const unsigned int* __restrict__ bucketOffset, const unsigned int* __restrict__ bucketOfElement, const unsigned int* __restrict__ offsetInBucket) { for (unsigned int index = blockDim.x*blockIdx.x+threadIdx.x; index < length; index += blockDim.x*gridDim.x) { DATA_TYPE element = data[index]; unsigned int bucketIndex = bucketOfElement[index]; unsigned int offset = (bucketIndex == 0 ? 0 : bucketOffset[bucketIndex-1]); buckets[offset+offsetInBucket[index]] = element; } } /** * Sort the data in each bucket. */ __global__ void sortBuckets(DATA_TYPE* __restrict__ data, const DATA_TYPE* __restrict__ buckets, unsigned int numBuckets, const unsigned int* __restrict__ bucketOffset) { extern __shared__ DATA_TYPE dataBuffer[]; for (unsigned int index = blockIdx.x; index < numBuckets; index += gridDim.x) { unsigned int startIndex = (index == 0 ? 0 : bucketOffset[index-1]); unsigned int endIndex = bucketOffset[index]; unsigned int length = endIndex-startIndex; if (length <= blockDim.x) { // Load the data into local memory. if (threadIdx.x < length) dataBuffer[threadIdx.x] = buckets[startIndex+threadIdx.x]; else dataBuffer[threadIdx.x] = MAX_VALUE; __syncthreads(); // Perform a bitonic sort in local memory. for (unsigned int k = 2; k <= blockDim.x; k *= 2) { for (unsigned int j = k/2; j > 0; j /= 2) { int ixj = threadIdx.x^j; if (ixj > threadIdx.x) { DATA_TYPE value1 = dataBuffer[threadIdx.x]; DATA_TYPE value2 = dataBuffer[ixj]; bool ascending = (threadIdx.x&k) == 0; KEY_TYPE lowKey = (ascending ? getValue(value1) : getValue(value2)); KEY_TYPE highKey = (ascending ? getValue(value2) : getValue(value1)); if (lowKey > highKey) { dataBuffer[threadIdx.x] = value2; dataBuffer[ixj] = value1; } } __syncthreads(); } } // Write the data to the sorted array. if (threadIdx.x < length) data[startIndex+threadIdx.x] = dataBuffer[threadIdx.x]; } else { // Copy the bucket data over to the output array. for (unsigned int i = threadIdx.x; i < length; i += blockDim.x) data[startIndex+i] = buckets[startIndex+i]; __threadfence_block(); __syncthreads(); // Perform a bitonic sort in global memory. for (unsigned int k = 2; k < 2*length; k *= 2) { for (unsigned int j = k/2; j > 0; j /= 2) { for (unsigned int i = threadIdx.x; i < length; i += blockDim.x) { int ixj = i^j; if (ixj > i && ixj < length) { DATA_TYPE value1 = data[startIndex+i]; DATA_TYPE value2 = data[startIndex+ixj]; bool ascending = ((i&k) == 0); for (unsigned int mask = k*2; mask < 2*length; mask *= 2) ascending = ((i&mask) == 0 ? !ascending : ascending); KEY_TYPE lowKey = (ascending ? getValue(value1) : getValue(value2)); KEY_TYPE highKey = (ascending ? getValue(value2) : getValue(value1)); if (lowKey > highKey) { data[startIndex+i] = value2; data[startIndex+ixj] = value1; } } } __threadfence_block(); __syncthreads(); } } } } } }
a16c0630d67972febcf6cb5daa77c92fd42144d8.cu
__device__ KEY_TYPE getValue(DATA_TYPE value) { return SORT_KEY; } extern "C" { /** * Sort a list that is short enough to entirely fit in local memory. This is executed as * a single thread block. */ __global__ void sortShortList(DATA_TYPE* __restrict__ data, unsigned int length) { // Load the data into local memory. extern __shared__ DATA_TYPE dataBuffer[]; for (int index = threadIdx.x; index < length; index += blockDim.x) dataBuffer[index] = data[index]; __syncthreads(); // Perform a bitonic sort in local memory. for (unsigned int k = 2; k < 2*length; k *= 2) { for (unsigned int j = k/2; j > 0; j /= 2) { for (unsigned int i = threadIdx.x; i < length; i += blockDim.x) { int ixj = i^j; if (ixj > i && ixj < length) { DATA_TYPE value1 = dataBuffer[i]; DATA_TYPE value2 = dataBuffer[ixj]; bool ascending = ((i&k) == 0); for (unsigned int mask = k*2; mask < 2*length; mask *= 2) ascending = ((i&mask) == 0 ? !ascending : ascending); KEY_TYPE lowKey = (ascending ? getValue(value1) : getValue(value2)); KEY_TYPE highKey = (ascending ? getValue(value2) : getValue(value1)); if (lowKey > highKey) { dataBuffer[i] = value2; dataBuffer[ixj] = value1; } } } __syncthreads(); } } // Write the data back to global memory. for (int index = threadIdx.x; index < length; index += blockDim.x) data[index] = dataBuffer[index]; } /** * Calculate the minimum and maximum value in the array to be sorted. This kernel * is executed as a single work group. */ __global__ void computeRange(const DATA_TYPE* __restrict__ data, unsigned int length, KEY_TYPE* __restrict__ range, unsigned int numBuckets, unsigned int* __restrict__ bucketOffset) { extern __shared__ KEY_TYPE rangeBuffer[]; KEY_TYPE minimum = MAX_KEY; KEY_TYPE maximum = MIN_KEY; // Each thread calculates the range of a subset of values. for (unsigned int index = threadIdx.x; index < length; index += blockDim.x) { KEY_TYPE value = getValue(data[index]); minimum = min(minimum, value); maximum = max(maximum, value); } // Now reduce them. rangeBuffer[threadIdx.x] = minimum; __syncthreads(); for (unsigned int step = 1; step < blockDim.x; step *= 2) { if (threadIdx.x+step < blockDim.x && threadIdx.x%(2*step) == 0) rangeBuffer[threadIdx.x] = min(rangeBuffer[threadIdx.x], rangeBuffer[threadIdx.x+step]); __syncthreads(); } minimum = rangeBuffer[0]; __syncthreads(); rangeBuffer[threadIdx.x] = maximum; __syncthreads(); for (unsigned int step = 1; step < blockDim.x; step *= 2) { if (threadIdx.x+step < blockDim.x && threadIdx.x%(2*step) == 0) rangeBuffer[threadIdx.x] = max(rangeBuffer[threadIdx.x], rangeBuffer[threadIdx.x+step]); __syncthreads(); } maximum = rangeBuffer[0]; if (threadIdx.x == 0) { range[0] = minimum; range[1] = maximum; } // Clear the bucket counters in preparation for the next kernel. for (unsigned int index = threadIdx.x; index < numBuckets; index += blockDim.x) bucketOffset[index] = 0; } /** * Assign elements to buckets. */ __global__ void assignElementsToBuckets(const DATA_TYPE* __restrict__ data, unsigned int length, unsigned int numBuckets, const KEY_TYPE* __restrict__ range, unsigned int* bucketOffset, unsigned int* __restrict__ bucketOfElement, unsigned int* __restrict__ offsetInBucket) { float minValue = (float) (range[0]); float maxValue = (float) (range[1]); float bucketWidth = (maxValue-minValue)/numBuckets; for (unsigned int index = blockDim.x*blockIdx.x+threadIdx.x; index < length; index += blockDim.x*gridDim.x) { float key = (float) getValue(data[index]); unsigned int bucketIndex = min((unsigned int) ((key-minValue)/bucketWidth), numBuckets-1); offsetInBucket[index] = atomicAdd(&bucketOffset[bucketIndex], 1); bucketOfElement[index] = bucketIndex; } } /** * Sum the bucket sizes to compute the start position of each bucket. This kernel * is executed as a single work group. */ __global__ void computeBucketPositions(unsigned int numBuckets, unsigned int* __restrict__ bucketOffset) { extern __shared__ unsigned int posBuffer[]; unsigned int globalOffset = 0; for (unsigned int startBucket = 0; startBucket < numBuckets; startBucket += blockDim.x) { // Load the bucket sizes into local memory. unsigned int globalIndex = startBucket+threadIdx.x; posBuffer[threadIdx.x] = (globalIndex < numBuckets ? bucketOffset[globalIndex] : 0); __syncthreads(); // Perform a parallel prefix sum. for (unsigned int step = 1; step < blockDim.x; step *= 2) { unsigned int add = (threadIdx.x >= step ? posBuffer[threadIdx.x-step] : 0); __syncthreads(); posBuffer[threadIdx.x] += add; __syncthreads(); } // Write the results back to global memory. if (globalIndex < numBuckets) bucketOffset[globalIndex] = posBuffer[threadIdx.x]+globalOffset; globalOffset += posBuffer[blockDim.x-1]; } } /** * Copy the input data into the buckets for sorting. */ __global__ void copyDataToBuckets(const DATA_TYPE* __restrict__ data, DATA_TYPE* __restrict__ buckets, unsigned int length, const unsigned int* __restrict__ bucketOffset, const unsigned int* __restrict__ bucketOfElement, const unsigned int* __restrict__ offsetInBucket) { for (unsigned int index = blockDim.x*blockIdx.x+threadIdx.x; index < length; index += blockDim.x*gridDim.x) { DATA_TYPE element = data[index]; unsigned int bucketIndex = bucketOfElement[index]; unsigned int offset = (bucketIndex == 0 ? 0 : bucketOffset[bucketIndex-1]); buckets[offset+offsetInBucket[index]] = element; } } /** * Sort the data in each bucket. */ __global__ void sortBuckets(DATA_TYPE* __restrict__ data, const DATA_TYPE* __restrict__ buckets, unsigned int numBuckets, const unsigned int* __restrict__ bucketOffset) { extern __shared__ DATA_TYPE dataBuffer[]; for (unsigned int index = blockIdx.x; index < numBuckets; index += gridDim.x) { unsigned int startIndex = (index == 0 ? 0 : bucketOffset[index-1]); unsigned int endIndex = bucketOffset[index]; unsigned int length = endIndex-startIndex; if (length <= blockDim.x) { // Load the data into local memory. if (threadIdx.x < length) dataBuffer[threadIdx.x] = buckets[startIndex+threadIdx.x]; else dataBuffer[threadIdx.x] = MAX_VALUE; __syncthreads(); // Perform a bitonic sort in local memory. for (unsigned int k = 2; k <= blockDim.x; k *= 2) { for (unsigned int j = k/2; j > 0; j /= 2) { int ixj = threadIdx.x^j; if (ixj > threadIdx.x) { DATA_TYPE value1 = dataBuffer[threadIdx.x]; DATA_TYPE value2 = dataBuffer[ixj]; bool ascending = (threadIdx.x&k) == 0; KEY_TYPE lowKey = (ascending ? getValue(value1) : getValue(value2)); KEY_TYPE highKey = (ascending ? getValue(value2) : getValue(value1)); if (lowKey > highKey) { dataBuffer[threadIdx.x] = value2; dataBuffer[ixj] = value1; } } __syncthreads(); } } // Write the data to the sorted array. if (threadIdx.x < length) data[startIndex+threadIdx.x] = dataBuffer[threadIdx.x]; } else { // Copy the bucket data over to the output array. for (unsigned int i = threadIdx.x; i < length; i += blockDim.x) data[startIndex+i] = buckets[startIndex+i]; __threadfence_block(); __syncthreads(); // Perform a bitonic sort in global memory. for (unsigned int k = 2; k < 2*length; k *= 2) { for (unsigned int j = k/2; j > 0; j /= 2) { for (unsigned int i = threadIdx.x; i < length; i += blockDim.x) { int ixj = i^j; if (ixj > i && ixj < length) { DATA_TYPE value1 = data[startIndex+i]; DATA_TYPE value2 = data[startIndex+ixj]; bool ascending = ((i&k) == 0); for (unsigned int mask = k*2; mask < 2*length; mask *= 2) ascending = ((i&mask) == 0 ? !ascending : ascending); KEY_TYPE lowKey = (ascending ? getValue(value1) : getValue(value2)); KEY_TYPE highKey = (ascending ? getValue(value2) : getValue(value1)); if (lowKey > highKey) { data[startIndex+i] = value2; data[startIndex+ixj] = value1; } } } __threadfence_block(); __syncthreads(); } } } } } }
c7840b2ee21a9dce272f2844b75afd3b4d46fc97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void foo() { } int main() { hipLaunchKernelGGL(( foo), dim3(1),dim3(1), 0, 0, ); printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError())); return 0; }
c7840b2ee21a9dce272f2844b75afd3b4d46fc97.cu
#include <stdio.h> __global__ void foo() { } int main() { foo<<<1,1>>>(); printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError())); return 0; }
145bd786dabf261554202fb8cb0894033c21bcad.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 4 #define TC 16 #define C 64 #define N 32 #define H 112 #define W 112 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(hipError_t code) { if (code != hipSuccess) { std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[1920]; __shared__ float kernel_shared[768]; float pad_temp_shared_local[6]; float kernel_shared_local[12]; compute_local[(0)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 4; ++rc_outer) { for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { __syncthreads(); if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + ((((int)threadIdx.x) * 9) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + ((((int)threadIdx.x) * 9) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1920) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 480) { if (((int)threadIdx.x) < 27) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)))) && (((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + (((((int)threadIdx.x) * 9) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + ((((((int)threadIdx.x) * 9) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + ((((int)threadIdx.x) * 9) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 1) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 1) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1919) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 479) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 1) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 1) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 1) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 2) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 2) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1918) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 478) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 2) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 2) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 2) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 3) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 3) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1917) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 477) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 3) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 3) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 3) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 4) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 4) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1916) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 476) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 4) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 4) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 4) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 5) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 5) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1915) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 475) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 5) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 5) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 5) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 6) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 6) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1914) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 474) { if (((int)threadIdx.x) < 26) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 6) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 6) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 6) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 7) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 7) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1913) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 473) { if (((int)threadIdx.x) < 26) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 7) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 7) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 7) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 8) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 8) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1912) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 472) { if (((int)threadIdx.x) < 26) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 8) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 8) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 8) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((int)threadIdx.x) / 12)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 256) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 768) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 192) { if (((int)threadIdx.x) < 24) { kernel_shared[((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((int)threadIdx.x) / 12) * 576)) + (rc_outer * 144)) + ((((((int)threadIdx.x) % 12) * 4) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))]; } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 1) / 48)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 1) / 3)) < 256) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 767) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 191) { if (((int)threadIdx.x) < 24) { kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 1) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 1) % 3)))]; } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 2) / 48)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 2) / 3)) < 256) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 766) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 190) { if (((int)threadIdx.x) < 24) { kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 2) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 2) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 2) % 3)))]; } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 3) / 48)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 255) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 765) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 189) { if (((int)threadIdx.x) < 24) { kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 3))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 3) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 3) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))]; } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) { pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)))]; pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 60))]; pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 1))]; pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 61))]; pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 2))]; pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 62))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)))]; kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 1))]; kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 2))]; kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 48))]; kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 49))]; kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 50))]; kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 96))]; kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 97))]; kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 98))]; kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 144))]; kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 145))]; kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 146))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(6)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(9)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(7)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(10)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(5)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(8)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(11)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)])); } } } compute[(((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)))] = compute_local[(0)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 224))] = compute_local[(4)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12544))] = compute_local[(1)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12768))] = compute_local[(5)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25088))] = compute_local[(2)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25312))] = compute_local[(6)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37632))] = compute_local[(3)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37856))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; hipMalloc(&device_input,C*H*W*sizeof(float)); hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; hipEvent_t event_start; hipEvent_t event_stop; hipEventCreate(&event_start); hipEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; hipMalloc(&device_out,H*W*N*sizeof(float)); hipMemset(device_out,0,H*W*N*sizeof(float)); hipMalloc(&device_K,C*N*9*sizeof(float)); hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice); hipEventRecord(event_start); convGemm.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnGemmTime; hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop); hipEventRecord(event_start); convWinogradeNon.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnWinogradeTimeNon; hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); hipEventRecord(event_start); convFFT.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnFFTTime; hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(4,28,2); dim3 block(28,2,4); hipEventRecord(event_start); hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tvm; hipEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); hipMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); hipEventRecord(event_start); hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tdc; hipEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<< cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
145bd786dabf261554202fb8cb0894033c21bcad.cu
#include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 4 #define TC 16 #define C 64 #define N 32 #define H 112 #define W 112 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(cudaError_t code) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[1920]; __shared__ float kernel_shared[768]; float pad_temp_shared_local[6]; float kernel_shared_local[12]; compute_local[(0)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 4; ++rc_outer) { for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { __syncthreads(); if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + ((((int)threadIdx.x) * 9) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + ((((int)threadIdx.x) * 9) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1920) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 480) { if (((int)threadIdx.x) < 27) { pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)))) && (((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + (((((int)threadIdx.x) * 9) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + ((((((int)threadIdx.x) * 9) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + ((((int)threadIdx.x) * 9) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 1) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 1) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1919) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 479) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 1) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 1) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 1) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 2) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 2) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1918) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 478) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 2) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 2) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 2) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 3) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 3) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1917) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 477) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 3) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 3) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 3) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 4) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 4) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1916) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 476) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 4) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 4) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 4) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 5) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 5) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1915) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 475) { if (((int)threadIdx.x) < 27) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 5) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 5) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 5) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 6) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 6) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1914) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 474) { if (((int)threadIdx.x) < 26) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 6) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 6) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 6) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 7) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 7) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1913) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 473) { if (((int)threadIdx.x) < 26) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 7) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 7) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 7) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 8) / 120)) < 16) { if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 8) / 30)) < 64) { if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1912) { if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 472) { if (((int)threadIdx.x) < 26) { pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 8) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 8) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 8) % 30)) - 113))] : 0.000000e+00f); } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((int)threadIdx.x) / 12)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 256) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 768) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 192) { if (((int)threadIdx.x) < 24) { kernel_shared[((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((int)threadIdx.x) / 12) * 576)) + (rc_outer * 144)) + ((((((int)threadIdx.x) % 12) * 4) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))]; } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 1) / 48)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 1) / 3)) < 256) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 767) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 191) { if (((int)threadIdx.x) < 24) { kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 1) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 1) % 3)))]; } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 2) / 48)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 2) / 3)) < 256) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 766) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 190) { if (((int)threadIdx.x) < 24) { kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 2) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 2) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 2) % 3)))]; } } } } } if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 3) / 48)) < 16) { if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 255) { if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 765) { if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 189) { if (((int)threadIdx.x) < 24) { kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 3))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 3) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 3) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))]; } } } } } __syncthreads(); for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) { pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)))]; pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 60))]; pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 1))]; pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 61))]; pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 2))]; pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 62))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)))]; kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 1))]; kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 2))]; kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 48))]; kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 49))]; kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 50))]; kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 96))]; kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 97))]; kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 98))]; kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 144))]; kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 145))]; kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 146))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(6)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(9)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(7)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(10)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(5)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(8)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(11)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)])); } } } compute[(((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)))] = compute_local[(0)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 224))] = compute_local[(4)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12544))] = compute_local[(1)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12768))] = compute_local[(5)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25088))] = compute_local[(2)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25312))] = compute_local[(6)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37632))] = compute_local[(3)]; compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37856))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; cudaMalloc(&device_input,C*H*W*sizeof(float)); cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; cudaEvent_t event_start; cudaEvent_t event_stop; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; cudaMalloc(&device_out,H*W*N*sizeof(float)); cudaMemset(device_out,0,H*W*N*sizeof(float)); cudaMalloc(&device_K,C*N*9*sizeof(float)); cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(event_start); convGemm.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnGemmTime; cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop); cudaEventRecord(event_start); convWinogradeNon.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnWinogradeTimeNon; cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); cudaEventRecord(event_start); convFFT.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnFFTTime; cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(4,28,2); dim3 block(28,2,4); cudaEventRecord(event_start); default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tvm; cudaEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); cudaMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); cudaEventRecord(event_start); conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tdc; cudaEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<< cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
3ddba9bba7cbad28f7b4fb92ac753cd330bea5dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************* c* Multimodal Deformable Image Registration * c* via Mutual Information or Bhattacharyya Distantce * c* Version: 1.0 * c* Language: C, CUDA * c* * c* Developer: Yifei Lou * c* Email: yifei.lou@ece.gatech.edu * c* * c* School of Electrical and Computer Engineering * c* Georgia Institute of Technology * c* Atlanta, GA, 30318 * c* Website: http://groups.bme.gatech.edu/groups/bil/ * c* * c* Copyright (c) 2011 * c* All rights reserved. * c* * c* Permission to use, copy, or modify this code and its * c* documentation for scientific purpose is hereby granted * c* without fee, provided that this copyright notice appear in * c* all copies and that both that copyright notice and this * c* permission notice appear in supporting documentation. The use * c* for commercial purposes is prohibited without permission. * c* * c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT * c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF* c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * c* THE POSSIBILITY OF SUCH DAMAGE. * c* * c******************************************************************/ /******************************************************************* c* Short discription * c* Finalize the reconstruction on the current scale and for the * c* entire program, output results, release memory spaces for * c* global variables, etc. * c******************************************************************/ #include <iostream> #include <stdio.h> #include <cutil.h> #include <cutil_inline_runtime.h> #include "viscous_global.h" void fina() { // map output image to its original scale nblocks.x = NBLOCKX; nblocks.y = ((1 + (NX0*NY0*NZ0 - 1)/NTHREAD_PER_BLOCK) - 1) / NBLOCKX + 1; printf("moving image: max = %f, min = %f\n", max_im_move, min_im_move); hipLaunchKernelGGL(( intensityRescale), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_im_move[0], max_im_move, min_im_move, -1); // output results outputData(d_im_move[0], DATA_SIZE, outputfilename); outputData(d_mv_x[0], DATA_SIZE, output_mv_x); outputData(d_mv_y[0], DATA_SIZE, output_mv_y); outputData(d_mv_z[0], DATA_SIZE, output_mv_z); // free up the host and device // image pyramid for(int scale =0; scale <NSCALE; scale++) { hipFree(d_im_move[scale]); hipFree(d_im_static[scale]); hipFree(d_mv_x[scale]); hipFree(d_mv_y[scale]); hipFree(d_mv_z[scale]); } // Gaussian kernel hipFree(GaussKernelH); hipFree(GaussKernelHx); // histogram related hipFree(d_jointHistogram); hipFree(d_jointHistogram_conv); hipFree(d_probx); hipFree(d_proby); hipFree(d_Bsum); } void outputData(void *src, int size, const char *outputfilename) // output data to file { // void *tempData_h = malloc( size ); float *tempData_h = (float*) malloc (sizeof(float)*size); if (tempData_h == NULL) { fputs ("Memory error",stderr); exit (2); } cutilSafeCall( hipMemcpy( tempData_h, src, size, hipMemcpyDeviceToHost) ); // copy data from GPU to CPU FILE *fp; fp = fopen(outputfilename,"wb"); if( fp == NULL ) { std::cout << "Can not open file to write results."; exit(1); } fwrite (tempData_h, size, 1 , fp ); fclose(fp); // write results to file //printf("denoised data =%f\n", tempData_h[53]); free(tempData_h); // free space }
3ddba9bba7cbad28f7b4fb92ac753cd330bea5dc.cu
/******************************************************************* c* Multimodal Deformable Image Registration * c* via Mutual Information or Bhattacharyya Distantce * c* Version: 1.0 * c* Language: C, CUDA * c* * c* Developer: Yifei Lou * c* Email: yifei.lou@ece.gatech.edu * c* * c* School of Electrical and Computer Engineering * c* Georgia Institute of Technology * c* Atlanta, GA, 30318 * c* Website: http://groups.bme.gatech.edu/groups/bil/ * c* * c* Copyright (c) 2011 * c* All rights reserved. * c* * c* Permission to use, copy, or modify this code and its * c* documentation for scientific purpose is hereby granted * c* without fee, provided that this copyright notice appear in * c* all copies and that both that copyright notice and this * c* permission notice appear in supporting documentation. The use * c* for commercial purposes is prohibited without permission. * c* * c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT * c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF* c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * c* THE POSSIBILITY OF SUCH DAMAGE. * c* * c******************************************************************/ /******************************************************************* c* Short discription * c* Finalize the reconstruction on the current scale and for the * c* entire program, output results, release memory spaces for * c* global variables, etc. * c******************************************************************/ #include <iostream> #include <stdio.h> #include <cutil.h> #include <cutil_inline_runtime.h> #include "viscous_global.h" void fina() { // map output image to its original scale nblocks.x = NBLOCKX; nblocks.y = ((1 + (NX0*NY0*NZ0 - 1)/NTHREAD_PER_BLOCK) - 1) / NBLOCKX + 1; printf("moving image: max = %f, min = %f\n", max_im_move, min_im_move); intensityRescale<<<nblocks, NTHREAD_PER_BLOCK>>>(d_im_move[0], max_im_move, min_im_move, -1); // output results outputData(d_im_move[0], DATA_SIZE, outputfilename); outputData(d_mv_x[0], DATA_SIZE, output_mv_x); outputData(d_mv_y[0], DATA_SIZE, output_mv_y); outputData(d_mv_z[0], DATA_SIZE, output_mv_z); // free up the host and device // image pyramid for(int scale =0; scale <NSCALE; scale++) { cudaFree(d_im_move[scale]); cudaFree(d_im_static[scale]); cudaFree(d_mv_x[scale]); cudaFree(d_mv_y[scale]); cudaFree(d_mv_z[scale]); } // Gaussian kernel cudaFree(GaussKernelH); cudaFree(GaussKernelHx); // histogram related cudaFree(d_jointHistogram); cudaFree(d_jointHistogram_conv); cudaFree(d_probx); cudaFree(d_proby); cudaFree(d_Bsum); } void outputData(void *src, int size, const char *outputfilename) // output data to file { // void *tempData_h = malloc( size ); float *tempData_h = (float*) malloc (sizeof(float)*size); if (tempData_h == NULL) { fputs ("Memory error",stderr); exit (2); } cutilSafeCall( cudaMemcpy( tempData_h, src, size, cudaMemcpyDeviceToHost) ); // copy data from GPU to CPU FILE *fp; fp = fopen(outputfilename,"wb"); if( fp == NULL ) { std::cout << "Can not open file to write results."; exit(1); } fwrite (tempData_h, size, 1 , fp ); fclose(fp); // write results to file //printf("denoised data =%f\n", tempData_h[53]); free(tempData_h); // free space }
642e59e182699af241ddc73b7ed61784ff432e04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "instance_norm_impl.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2> __global__ void _InstanceNormKernel( const T1* __restrict__ input_data, const T1* __restrict__ scale, const T1* __restrict__ bias, const T2* __restrict__ mean, const T2* __restrict__ variance, const double variance_correction, const double epsilon, const fast_divmod fdm_HW, const fast_divmod fdm_C, T1* __restrict__ output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int nc = fdm_HW.div(id); int n, c; fdm_C.divmod(nc, n, c); // Y = scale * (x - mean) / sqrt (std * std + epsilon) + B output_data[id] = scale[c] * (input_data[id] - (T1)mean[nc]) / _Sqrt((T1)variance[nc] * (T1)variance_correction + (T1)epsilon) + bias[c]; } template <typename T1, typename T2> void InstanceNormImpl( hipStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* variance, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); hipLaunchKernelGGL(( _InstanceNormKernel<T1, T2>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, input_data, scale, bias, mean, variance, variance_correction, epsilon, fdm_HW, fdm_C, output_data, (CUDA_LONG)N); } #define SPECIALIZED_IMPL(T1, T2) \ template void InstanceNormImpl<T1, T2>(hipStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* stddev, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t count); SPECIALIZED_IMPL(float, float) SPECIALIZED_IMPL(double, double) // When the input data type is float16, the means and variances will flow in as float32 (special case) SPECIALIZED_IMPL(half, float) } // namespace cuda } // namespace onnxruntime
642e59e182699af241ddc73b7ed61784ff432e04.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "instance_norm_impl.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2> __global__ void _InstanceNormKernel( const T1* __restrict__ input_data, const T1* __restrict__ scale, const T1* __restrict__ bias, const T2* __restrict__ mean, const T2* __restrict__ variance, const double variance_correction, const double epsilon, const fast_divmod fdm_HW, const fast_divmod fdm_C, T1* __restrict__ output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int nc = fdm_HW.div(id); int n, c; fdm_C.divmod(nc, n, c); // Y = scale * (x - mean) / sqrt (std * std + epsilon) + B output_data[id] = scale[c] * (input_data[id] - (T1)mean[nc]) / _Sqrt((T1)variance[nc] * (T1)variance_correction + (T1)epsilon) + bias[c]; } template <typename T1, typename T2> void InstanceNormImpl( cudaStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* variance, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); _InstanceNormKernel<T1, T2><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( input_data, scale, bias, mean, variance, variance_correction, epsilon, fdm_HW, fdm_C, output_data, (CUDA_LONG)N); } #define SPECIALIZED_IMPL(T1, T2) \ template void InstanceNormImpl<T1, T2>(cudaStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* stddev, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t count); SPECIALIZED_IMPL(float, float) SPECIALIZED_IMPL(double, double) // When the input data type is float16, the means and variances will flow in as float32 (special case) SPECIALIZED_IMPL(half, float) } // namespace cuda } // namespace onnxruntime
2458682145457e66d4e2b798deaf41fcc6b19964.hip
// !!! This is a file automatically generated by hipify!!! /** C (suzuki.iichiro@kyodonews.jp) $ nvcc CUDA01_N-Queen.cu -o CUDA01_N-Queen $ ./CUDA01_N-Queen 1. () 8 : : 16777209: 7 7 7 7 7 7 7 0 16777210: 7 7 7 7 7 7 7 1 16777211: 7 7 7 7 7 7 7 2 16777212: 7 7 7 7 7 7 7 3 16777213: 7 7 7 7 7 7 7 4 16777214: 7 7 7 7 7 7 7 5 16777215: 7 7 7 7 7 7 7 6 16777216: 7 7 7 7 7 7 7 7 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // long Total=0 ; // long Unique=0; int down[2*MAX-1]; //down:flagA int left[2*MAX-1]; //left:flagB int right[2*MAX-1]; //right:flagC long TOTAL=0; long UNIQUE=0; int aBoard[MAX]; int fA[2*MAX-1]; // int fB[2*MAX-1]; // int fC[2*MAX-1]; // // __global__ void solve_nqueen_cuda_kernel_bt_bm( int n,int mark, unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight, unsigned int* results,int totalCond){ const int tid=threadIdx.x,bid=blockIdx.x,idx=bid*blockDim.x+tid; __shared__ unsigned int down[THREAD_NUM][10],left[THREAD_NUM][10],right[THREAD_NUM][10], bitmap[THREAD_NUM][10],sum[THREAD_NUM]; const unsigned int mask=(1<<n)-1;int total=0,i=0;unsigned int bit; if(idx<totalCond){ down[tid][i]=totalDown[idx]; left[tid][i]=totalLeft[idx]; right[tid][i]=totalRight[idx]; bitmap[tid][i]=down[tid][i]|left[tid][i]|right[tid][i]; while(i>=0){ if((bitmap[tid][i]&mask)==mask){i--;} else{ bit=(bitmap[tid][i]+1)&~bitmap[tid][i]; bitmap[tid][i]|=bit; if((bit&mask)!=0){ if(i+1==mark){total++;i--;} else{ down[tid][i+1]=down[tid][i]|bit; left[tid][i+1]=(left[tid][i]|bit)<<1; right[tid][i+1]=(right[tid][i]|bit)>>1; bitmap[tid][i+1]=(down[tid][i+1]|left[tid][i+1]|right[tid][i+1]); i++; } }else{i--;} } } sum[tid]=total; }else{sum[tid]=0;} __syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];} __syncthreads();if(tid<32){sum[tid]+=sum[tid+32];} __syncthreads();if(tid<16){sum[tid]+=sum[tid+16];} __syncthreads();if(tid<8){sum[tid]+=sum[tid+8];} __syncthreads();if(tid<4){sum[tid]+=sum[tid+4];} __syncthreads();if(tid<2){sum[tid]+=sum[tid+2];} __syncthreads();if(tid<1){sum[tid]+=sum[tid+1];} __syncthreads();if(tid==0){results[bid]=sum[0];} } // long long solve_nqueen_cuda(int n,int steps) { unsigned int down[32];unsigned int left[32];unsigned int right[32]; unsigned int m[32];unsigned int bit; if(n<=0||n>32){return 0;} unsigned int* totalDown=new unsigned int[steps]; unsigned int* totalLeft=new unsigned int[steps]; unsigned int* totalRight=new unsigned int[steps]; unsigned int* results=new unsigned int[steps]; unsigned int* downCuda;unsigned int* leftCuda;unsigned int* rightCuda; unsigned int* resultsCuda; hipMalloc((void**) &downCuda,sizeof(int)*steps); hipMalloc((void**) &leftCuda,sizeof(int)*steps); hipMalloc((void**) &rightCuda,sizeof(int)*steps); hipMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM); const unsigned int mask=(1<<n)-1; const unsigned int mark=n>11?n-10:2; long long total=0;int totalCond=0; int i=0,j;down[0]=0;left[0]=0;right[0]=0;m[0]=0;bool computed=false; for(j=0;j<n/2;j++){ bit=(1<<j);m[0]|=bit; down[1]=bit;left[1]=bit<<1;right[1]=bit>>1; m[1]=(down[1]|left[1]|right[1]); i=1; while(i>0){ if((m[i]&mask)==mask){i--;} else{ bit=(m[i]+1)&~m[i];m[i]|=bit; if((bit&mask)!=0){ down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1; m[i+1]=(down[i+1]|left[i+1]|right[i+1]); i++; if(i==mark){ totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i]; totalCond++; if(totalCond==steps){ if(computed){ hipMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } hipMemcpy(downCuda,totalDown,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,hipMemcpyHostToDevice); /** backTrack+bitmap*/ hipLaunchKernelGGL(( solve_nqueen_cuda_kernel_bt_bm), dim3(steps/THREAD_NUM),dim3(THREAD_NUM), 0, 0, n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); computed=true;totalCond=0; } i--; } }else{i --;} } } } if(computed){ hipMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } hipMemcpy(downCuda,totalDown,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,hipMemcpyHostToDevice); /** backTrack+bitmap*/ hipLaunchKernelGGL(( solve_nqueen_cuda_kernel_bt_bm), dim3(steps/THREAD_NUM),dim3(THREAD_NUM), 0, 0, n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); hipMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} total*=2; if(n%2==1){ computed=false;totalCond=0;bit=(1<<(n-1)/2);m[0]|=bit; down[1]=bit;left[1]=bit<<1;right[1]=bit>>1; m[1]=(down[1]|left[1]|right[1]); i=1; while(i>0){ if((m[i]&mask)==mask){i--;} else{ bit=(m[i]+1)&~m[i];m[i]|=bit; if((bit&mask)!=0){ down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1; m[i+1]=(down[i+1]|left[i+1]|right[i+1]); i++; if(i==mark){ totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i]; totalCond++; if(totalCond==steps){ if(computed){ hipMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } hipMemcpy(downCuda,totalDown,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,hipMemcpyHostToDevice); /** backTrack+bitmap*/ hipLaunchKernelGGL(( solve_nqueen_cuda_kernel_bt_bm), dim3(steps/THREAD_NUM),dim3(THREAD_NUM), 0, 0, n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); computed=true;totalCond=0; } i--; } }else{i --;} } } if(computed){ hipMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } hipMemcpy(downCuda,totalDown,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,hipMemcpyHostToDevice); hipMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,hipMemcpyHostToDevice); /** backTrack+bitmap*/ hipLaunchKernelGGL(( solve_nqueen_cuda_kernel_bt_bm), dim3(steps/THREAD_NUM),dim3(THREAD_NUM), 0, 0, n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); hipMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,hipMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} } hipFree(downCuda);hipFree(leftCuda);hipFree(rightCuda);hipFree(resultsCuda); delete[] totalDown;delete[] totalLeft;delete[] totalRight;delete[] results; return total; } /** CUDA **/ bool InitCUDA(){ int count; hipGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop,i)==hipSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} hipSetDevice(i); return true; } //main() void NQueen(int row,int size); void TimeFormat(clock_t utime,char *form); // void NQueen(int row,int size){ if(row==size){ // TOTAL++; }else{ for(int i=0;i<size;i++){ aBoard[row]=i; // if(fA[i]==0&&fB[row-i+(size-1)]==0&&fC[row+i]==0){ fA[i]=fB[row-i+(size-1)]=fC[row+i]=1; NQueen(row+1,size); // fA[i]=fB[row-i+(size-1)]=fC[row+i]=0; } } } } //hh:mm:ss.ms void TimeFormat(clock_t utime,char *form){ int dd,hh,mm; float ftime,ss; ftime=(float)utime/CLOCKS_PER_SEC; mm=(int)ftime/60; ss=ftime-(int)(mm*60); dd=mm/(24*60); mm=mm%(24*60); hh=mm/60; mm=mm%60; if(dd) sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss); else if(hh) sprintf(form," %2d:%02d:%05.2f",hh,mm,ss); else if(mm) sprintf(form," %2d:%05.2f",mm,ss); else sprintf(form," %5.2f",ss); } // int main(int argc,char** argv) { bool cpu=true,cpur=true,gpu=true; int argstart=1,steps=24576; /** */ if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){gpu=false;cpur=false;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpu=false;gpu=false;} else if(argv[1][1]=='g'||argv[1][1]=='G'){cpu=false;cpur=false;} argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r] n steps\n",argv[0]); printf(" -c: CPU only\n"); printf(" -r: CPUR only\n"); printf(" -g: GPU only\n"); printf("Default to 8 queen\n"); } /** */ /** CPU */ if(cpu){ printf("\n\n"); } /** CPUR */ if(cpur){ printf("\n\n"); clock_t st; // char t[20]; //hh:mm:ss.ms int min=4; //N printf("%s\n"," N: Total Unique hh:mm:ss.ms"); for(int i=min;i<=MAX;i++){ TOTAL=0; UNIQUE=0; // for(int j=0;j<i;j++){ aBoard[j]=j; } // st=clock(); // NQueen(0,i); TimeFormat(clock()-st,t); // printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t); // } return 0; } /** GPU */ if(gpu){ if(!InitCUDA()){return 0;} int min=4;int targetN=18; struct timeval t0;struct timeval t1;int ss;int ms;int dd; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int i=min;i<=targetN;i++){ gettimeofday(&t0,NULL); // Total=solve_nqueen_cuda(i,steps); gettimeofday(&t1,NULL); // if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; } int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%18ld%18ld%12.2d:%02d:%02d:%02d.%02d\n", i,Total,Unique,dd,hh,mm,ss,ms); } } return 0; }
2458682145457e66d4e2b798deaf41fcc6b19964.cu
/** Cで学ぶアルゴリズムとデータ構造 ステップバイステップでN−クイーン問題を最適化 一般社団法人 共同通信社 情報技術局 鈴木 維一郎(suzuki.iichiro@kyodonews.jp) コンパイル $ nvcc CUDA01_N-Queen.cu -o CUDA01_N-Queen 実行 $ ./CUDA01_N-Queen 1. ブルートフォース 力任せ探索  全ての可能性のある解の候補を体系的に数え上げ、それぞれの解候補が問題の解とな るかをチェックする方法 (※)各行に1個の王妃を配置する組み合わせを再帰的に列挙組み合わせを生成するだ けであって8王妃問題を解いているわけではありません 実行結果 : : 16777209: 7 7 7 7 7 7 7 0 16777210: 7 7 7 7 7 7 7 1 16777211: 7 7 7 7 7 7 7 2 16777212: 7 7 7 7 7 7 7 3 16777213: 7 7 7 7 7 7 7 4 16777214: 7 7 7 7 7 7 7 5 16777215: 7 7 7 7 7 7 7 6 16777216: 7 7 7 7 7 7 7 7 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // long Total=0 ; //合計解 long Unique=0; int down[2*MAX-1]; //down:flagA 縦 配置フラグ  int left[2*MAX-1]; //left:flagB 斜め配置フラグ  int right[2*MAX-1]; //right:flagC 斜め配置フラグ  long TOTAL=0; long UNIQUE=0; int aBoard[MAX]; int fA[2*MAX-1]; //縦列にクイーンを一つだけ配置 int fB[2*MAX-1]; //斜め列にクイーンを一つだけ配置 int fC[2*MAX-1]; //斜め列にクイーンを一つだけ配置 // __global__ void solve_nqueen_cuda_kernel_bt_bm( int n,int mark, unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight, unsigned int* results,int totalCond){ const int tid=threadIdx.x,bid=blockIdx.x,idx=bid*blockDim.x+tid; __shared__ unsigned int down[THREAD_NUM][10],left[THREAD_NUM][10],right[THREAD_NUM][10], bitmap[THREAD_NUM][10],sum[THREAD_NUM]; const unsigned int mask=(1<<n)-1;int total=0,i=0;unsigned int bit; if(idx<totalCond){ down[tid][i]=totalDown[idx]; left[tid][i]=totalLeft[idx]; right[tid][i]=totalRight[idx]; bitmap[tid][i]=down[tid][i]|left[tid][i]|right[tid][i]; while(i>=0){ if((bitmap[tid][i]&mask)==mask){i--;} else{ bit=(bitmap[tid][i]+1)&~bitmap[tid][i]; bitmap[tid][i]|=bit; if((bit&mask)!=0){ if(i+1==mark){total++;i--;} else{ down[tid][i+1]=down[tid][i]|bit; left[tid][i+1]=(left[tid][i]|bit)<<1; right[tid][i+1]=(right[tid][i]|bit)>>1; bitmap[tid][i+1]=(down[tid][i+1]|left[tid][i+1]|right[tid][i+1]); i++; } }else{i--;} } } sum[tid]=total; }else{sum[tid]=0;} __syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];} __syncthreads();if(tid<32){sum[tid]+=sum[tid+32];} __syncthreads();if(tid<16){sum[tid]+=sum[tid+16];} __syncthreads();if(tid<8){sum[tid]+=sum[tid+8];} __syncthreads();if(tid<4){sum[tid]+=sum[tid+4];} __syncthreads();if(tid<2){sum[tid]+=sum[tid+2];} __syncthreads();if(tid<1){sum[tid]+=sum[tid+1];} __syncthreads();if(tid==0){results[bid]=sum[0];} } // long long solve_nqueen_cuda(int n,int steps) { unsigned int down[32];unsigned int left[32];unsigned int right[32]; unsigned int m[32];unsigned int bit; if(n<=0||n>32){return 0;} unsigned int* totalDown=new unsigned int[steps]; unsigned int* totalLeft=new unsigned int[steps]; unsigned int* totalRight=new unsigned int[steps]; unsigned int* results=new unsigned int[steps]; unsigned int* downCuda;unsigned int* leftCuda;unsigned int* rightCuda; unsigned int* resultsCuda; cudaMalloc((void**) &downCuda,sizeof(int)*steps); cudaMalloc((void**) &leftCuda,sizeof(int)*steps); cudaMalloc((void**) &rightCuda,sizeof(int)*steps); cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM); const unsigned int mask=(1<<n)-1; const unsigned int mark=n>11?n-10:2; long long total=0;int totalCond=0; int i=0,j;down[0]=0;left[0]=0;right[0]=0;m[0]=0;bool computed=false; for(j=0;j<n/2;j++){ bit=(1<<j);m[0]|=bit; down[1]=bit;left[1]=bit<<1;right[1]=bit>>1; m[1]=(down[1]|left[1]|right[1]); i=1; while(i>0){ if((m[i]&mask)==mask){i--;} else{ bit=(m[i]+1)&~m[i];m[i]|=bit; if((bit&mask)!=0){ down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1; m[i+1]=(down[i+1]|left[i+1]|right[i+1]); i++; if(i==mark){ totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i]; totalCond++; if(totalCond==steps){ if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); computed=true;totalCond=0; } i--; } }else{i --;} } } } if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} total*=2; if(n%2==1){ computed=false;totalCond=0;bit=(1<<(n-1)/2);m[0]|=bit; down[1]=bit;left[1]=bit<<1;right[1]=bit>>1; m[1]=(down[1]|left[1]|right[1]); i=1; while(i>0){ if((m[i]&mask)==mask){i--;} else{ bit=(m[i]+1)&~m[i];m[i]|=bit; if((bit&mask)!=0){ down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1; m[i+1]=(down[i+1]|left[i+1]|right[i+1]); i++; if(i==mark){ totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i]; totalCond++; if(totalCond==steps){ if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); computed=true;totalCond=0; } i--; } }else{i --;} } } if(computed){ cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} computed=false; } cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice); cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice); /** backTrack+bitmap*/ solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond); cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost); for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];} } cudaFree(downCuda);cudaFree(leftCuda);cudaFree(rightCuda);cudaFree(resultsCuda); delete[] totalDown;delete[] totalLeft;delete[] totalRight;delete[] results; return total; } /** CUDA 初期化 **/ bool InitCUDA(){ int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } //main()以外のメソッドはここに一覧表記させます void NQueen(int row,int size); void TimeFormat(clock_t utime,char *form); // ロジックメソッド void NQueen(int row,int size){ if(row==size){ //最後までこれたらカウント TOTAL++; }else{ for(int i=0;i<size;i++){ aBoard[row]=i; //縦斜右斜左を判定 if(fA[i]==0&&fB[row-i+(size-1)]==0&&fC[row+i]==0){ fA[i]=fB[row-i+(size-1)]=fC[row+i]=1; NQueen(row+1,size); //再帰 fA[i]=fB[row-i+(size-1)]=fC[row+i]=0; } } } } //hh:mm:ss.ms形式に処理時間を出力 void TimeFormat(clock_t utime,char *form){ int dd,hh,mm; float ftime,ss; ftime=(float)utime/CLOCKS_PER_SEC; mm=(int)ftime/60; ss=ftime-(int)(mm*60); dd=mm/(24*60); mm=mm%(24*60); hh=mm/60; mm=mm%60; if(dd) sprintf(form,"%4d %02d:%02d:%05.2f",dd,hh,mm,ss); else if(hh) sprintf(form," %2d:%02d:%05.2f",hh,mm,ss); else if(mm) sprintf(form," %2d:%05.2f",mm,ss); else sprintf(form," %5.2f",ss); } //メインメソッド int main(int argc,char** argv) { bool cpu=true,cpur=true,gpu=true; int argstart=1,steps=24576; /** パラメータの処理 */ if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){gpu=false;cpur=false;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpu=false;gpu=false;} else if(argv[1][1]=='g'||argv[1][1]=='G'){cpu=false;cpur=false;} argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r] n steps\n",argv[0]); printf(" -c: CPU only\n"); printf(" -r: CPUR only\n"); printf(" -g: GPU only\n"); printf("Default to 8 queen\n"); } /** 出力と実行 */ /** CPU */ if(cpu){ printf("\n\n3.バックトラック"); } /** CPUR */ if(cpur){ printf("\n\n3.バックトラック"); clock_t st; //速度計測用 char t[20]; //hh:mm:ss.msを格納 int min=4; //Nの最小値(スタートの値)を格納 printf("%s\n"," N: Total Unique hh:mm:ss.ms"); for(int i=min;i<=MAX;i++){ TOTAL=0; UNIQUE=0; //初期化 for(int j=0;j<i;j++){ aBoard[j]=j; } //版を初期化 st=clock(); //計測開始 NQueen(0,i); TimeFormat(clock()-st,t); //計測終了 printf("%2d:%13ld%16ld%s\n",i,TOTAL,UNIQUE,t); //出力 } return 0; } /** GPU */ if(gpu){ if(!InitCUDA()){return 0;} int min=4;int targetN=18; struct timeval t0;struct timeval t1;int ss;int ms;int dd; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int i=min;i<=targetN;i++){ gettimeofday(&t0,NULL); // 計測開始 Total=solve_nqueen_cuda(i,steps); gettimeofday(&t1,NULL); // 計測終了 if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; } int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%18ld%18ld%12.2d:%02d:%02d:%02d.%02d\n", i,Total,Unique,dd,hh,mm,ss,ms); } } return 0; }
54c403bfa07005b51950c0d200a588d94a11bb2f.hip
// !!! This is a file automatically generated by hipify!!! #include "common/xtestrunner.h" #include <algorithm> #include <xcbb/xcbb.h> class RadixSortTest: public CudaTest { protected: RadixSortTest() {} virtual ~RadixSortTest() {} virtual void SetUp() { CudaTest::SetUp(); std::srand(time(0)); } virtual void TearDown() {} protected: template <int RADIX_DIGITS> void CreateSample0(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = 0; } template <int RADIX_DIGITS> void CreateSample1(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 4; } template <int RADIX_DIGITS> void CreateSample2(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample3(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample4(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample5(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample6(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 28; } template <int RADIX_DIGITS> void CreateSample7(std::vector<uint>& key) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; key[(rand() % 220460)] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; } template <int RADIX_DIGITS> void CreateSample8(std::vector<uint>& key) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; key[(rand() % 220460)] += (rand() % RADIX_DIGITS) << 28; } }; /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort0) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample0<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort1) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample1<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort2) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample2<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort3) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample3<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort4) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample4<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort5) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample5<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort6) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample6<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements0) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 200; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample0<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements1) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 256; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample1<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements2) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 11; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample2<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements3) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 23; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample3<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements4) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 511; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample4<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements5) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 1; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample5<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements6) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 317; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample6<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements7) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 8; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 317; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample7<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements8) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 5; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 317; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample8<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(hipMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_data, data.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(result.data(), d_data, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /********************************************************************************************************************* * * * *********************************************************************************************************************/ class RadixSortKeyValueTest: public CudaTest { protected: RadixSortKeyValueTest() {} virtual ~RadixSortKeyValueTest() {} virtual void SetUp() { CudaTest::SetUp(); std::srand(time(0)); } virtual void TearDown() {} protected: template <int RADIX_DIGITS> void CreateSample0(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = 0; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample1(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample2(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample3(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; key[(rand() % (key.size() - 2))] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample4(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; key[(rand() % (key.size() - 2))] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample5(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample6(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample7(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample8(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; key[(rand() % (key.size() - 2))] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } }; /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue0) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 5; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample0<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue1) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 3; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 100; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample1<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue2) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 8; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 100; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample2<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue3) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 7; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 2*512 + 32; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample3<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue4) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 7; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 10*512 + 300; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample4<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue5) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 5; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 500; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample5<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue6) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 7; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample6<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue7) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 7; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample7<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue8) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 7; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample8<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(hipMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); uint* d_values; checkCudaErrors(hipMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(hipMemcpy(d_values, values.data(), sizeof(uint) * numElements, hipMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(hipMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_keys)); checkCudaErrors(hipFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
54c403bfa07005b51950c0d200a588d94a11bb2f.cu
#include "common/xtestrunner.h" #include <algorithm> #include <xcbb/xcbb.h> class RadixSortTest: public CudaTest { protected: RadixSortTest() {} virtual ~RadixSortTest() {} virtual void SetUp() { CudaTest::SetUp(); std::srand(time(0)); } virtual void TearDown() {} protected: template <int RADIX_DIGITS> void CreateSample0(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = 0; } template <int RADIX_DIGITS> void CreateSample1(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 4; } template <int RADIX_DIGITS> void CreateSample2(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample3(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample4(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample5(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 0; } template <int RADIX_DIGITS> void CreateSample6(std::vector<uint>& data) { for (int i = 0; i < data.size(); ++i) data[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < data.size(); ++i) data[i] += (rand() % RADIX_DIGITS) << 28; } template <int RADIX_DIGITS> void CreateSample7(std::vector<uint>& key) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; key[(rand() % 220460)] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; } template <int RADIX_DIGITS> void CreateSample8(std::vector<uint>& key) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; key[(rand() % 220460)] += (rand() % RADIX_DIGITS) << 28; } }; /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort0) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample0<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort1) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample1<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort2) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample2<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort3) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample3<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort4) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample4<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort5) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample5<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, RadixSort6) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample6<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution EXPECT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements0) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 200; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample0<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements1) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 256; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample1<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements2) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 11; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample2<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements3) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 23; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample3<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements4) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 511; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample4<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements5) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 4*512 + 1; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample5<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements6) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 200; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 317; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample6<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements7) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 8; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 317; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample7<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortTest, WithExtraElements8) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 5; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 317; std::vector<uint> data(numElements); std::vector<uint> result(numElements); ASSERT_EQ(numElements, data.size()); ASSERT_EQ(numElements, result.size()); CreateSample8<RADIX_DIGITS>(data); // Push array of keys to device uint* d_data; checkCudaErrors(cudaMalloc((void**) &d_data, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_data, data.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(data.begin(), data.end()); // Initialize sort storage RadixsortStorage<uint> storage(numElements); storage.InitDeviceStorage(d_data); // Create sort enactor RadixsortEnactor<uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(result.data(), d_data, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_data)); // Compare with reference solution ASSERT_RANGE_EQ(data, result); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /********************************************************************************************************************* * * * *********************************************************************************************************************/ class RadixSortKeyValueTest: public CudaTest { protected: RadixSortKeyValueTest() {} virtual ~RadixSortKeyValueTest() {} virtual void SetUp() { CudaTest::SetUp(); std::srand(time(0)); } virtual void TearDown() {} protected: template <int RADIX_DIGITS> void CreateSample0(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = 0; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample1(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample2(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample3(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; key[(rand() % (key.size() - 2))] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample4(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; key[(rand() % (key.size() - 2))] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample5(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample6(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample7(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } template <int RADIX_DIGITS> void CreateSample8(std::vector<uint>& key, std::vector<uint>& val) { for (int i = 0; i < key.size(); ++i) key[i] = (rand() % RADIX_DIGITS) << 0; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 4; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 8; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 12; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 16; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 20; for (int i = 0; i < key.size(); ++i) key[i] += (rand() % RADIX_DIGITS) << 24; key[(rand() % (key.size() - 2))] += (rand() % RADIX_DIGITS) << 28; for (int i = 0; i < key.size(); ++i) val[i] = key[i]; } }; /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue0) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 5; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample0<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue1) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 3; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 100; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample1<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue2) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 8; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 100; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample2<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue3) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 7; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 2*512 + 32; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample3<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue4) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 7; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 10*512 + 300; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample4<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue5) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 5; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 7*512 + 500; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample5<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue6) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 7; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample6<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue7) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 7; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample7<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } /*********************************************************************************** * ***********************************************************************************/ TEST_F(RadixSortKeyValueTest, KeyValue8) { const int RADIX_BITS = 4; const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILES = 10; const int numBlocks = 128; const int numElements = numBlocks * 512 * TILES + 512 + 7; std::vector<uint> keys(numElements); std::vector<uint> values(numElements); std::vector<uint> resultkeys(numElements); std::vector<uint> resultvalues(numElements); ASSERT_EQ(numElements, keys.size()); ASSERT_EQ(numElements, values.size()); ASSERT_EQ(numElements, resultkeys.size()); ASSERT_EQ(numElements, resultvalues.size()); CreateSample8<RADIX_DIGITS>(keys, values); uint* d_keys; checkCudaErrors(cudaMalloc((void**) &d_keys, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_keys, keys.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); uint* d_values; checkCudaErrors(cudaMalloc((void**) &d_values, sizeof(uint) * numElements)); checkCudaErrors(cudaMemcpy(d_values, values.data(), sizeof(uint) * numElements, cudaMemcpyHostToDevice)); // Reference serial sort std::sort(keys.begin(), keys.end()); // Initialize sort storage RadixsortStorage<uint, uint> storage(numElements); storage.InitDeviceStorage(d_keys, d_values); // Create sort enactor RadixsortEnactor<uint, uint> sorter(numElements); // Perform radix sort algorithm CudaDeviceTimer timer; timer.Start(); sorter.Enact(storage); timer.Stop(); // Get scanned array back to host checkCudaErrors(cudaMemcpy(resultkeys.data(), d_keys, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(resultvalues.data(), d_values, sizeof(uint) * numElements, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_keys)); checkCudaErrors(cudaFree(d_values)); // Compare with reference solution EXPECT_RANGE_EQ(keys, resultkeys); EXPECT_RANGE_EQ(keys, resultvalues); printf("Problem: %d\n", numElements); printf("Time: %.3f [ms]\n", timer.ElapsedTime()); } int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
f48886f5bcea2caaf8633516a0dab594ee33af0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int labels_per_instance_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; int label_count = 0; loss[index] = 0; counts[index] = 0; for (int k = 0; k < labels_per_instance_; k++) { const int label_value = static_cast<int>(label[(n * spatial_dim + s)* labels_per_instance_ + k]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } else { ++label_count; loss[index] -= log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); } } if (label_count > 0){ loss[index] /= Dtype(label_count); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, labels_per_instance_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int labels_per_instance_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; int label_count = 0; for (int k = 0; k < labels_per_instance_; k++) { const int label_value = static_cast<int>(label[(n * spatial_dim + s)* labels_per_instance_ + k]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } else { ++label_count; } } if (label_count == 0){ for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { counts[index] = 1; for (int k = 0; k < labels_per_instance_; k++) { const int label_value = static_cast<int>(label[(n * spatial_dim + s)* labels_per_instance_ + k]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= Dtype(1)/Dtype(label_count); } } } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, labels_per_instance_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
f48886f5bcea2caaf8633516a0dab594ee33af0e.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int labels_per_instance_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; int label_count = 0; loss[index] = 0; counts[index] = 0; for (int k = 0; k < labels_per_instance_; k++) { const int label_value = static_cast<int>(label[(n * spatial_dim + s)* labels_per_instance_ + k]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } else { ++label_count; loss[index] -= log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); } } if (label_count > 0){ loss[index] /= Dtype(label_count); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, labels_per_instance_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int labels_per_instance_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; int label_count = 0; for (int k = 0; k < labels_per_instance_; k++) { const int label_value = static_cast<int>(label[(n * spatial_dim + s)* labels_per_instance_ + k]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } else { ++label_count; } } if (label_count == 0){ for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { counts[index] = 1; for (int k = 0; k < labels_per_instance_; k++) { const int label_value = static_cast<int>(label[(n * spatial_dim + s)* labels_per_instance_ + k]); if (has_ignore_label_ && label_value == ignore_label_) { continue; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= Dtype(1)/Dtype(label_count); } } } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, labels_per_instance_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
f4459fe7a431e490ed754975b2c08dae03b40aa1.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <iostream> #include <opencv2/opencv.hpp> #include <sys/time.h> // Check cudnn #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } // Get time function double getTimeStamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_usec / 1000000 + tv.tv_sec; } // Load Image function cv::Mat load_image(const char *image_path) { cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR); image.convertTo(image, CV_32FC3); cv::normalize(image, image, 0, 1, cv::NORM_MINMAX); return image; } // Save Image function void save_image(const char *output_filename, float *buffer, int height, int width) { cv::Mat output_image(height, width, CV_32FC3, buffer); // Make negative values zero. cv::threshold(output_image, output_image, /*threshold=*/0, /*maxval=*/0, cv::THRESH_TOZERO); cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX); output_image.convertTo(output_image, CV_8UC3); cv::imwrite(output_filename, output_image); } int main(int argc, char *argv[]) { // Init cudnn hipDeviceReset(); cudnnHandle_t cudnn; cudnnCreate(&cudnn); char *outputfile = (char *)"cudnn_out_55.png"; // Check input image name if (argc < 2) { std::cout << "No file input" << std::endl; return 0; } // // Check if the filename is valid char *filename = argv[1]; std::cout << argv[1] << std::endl; // Load Image cv::Mat image; image = load_image(filename); if (image.empty()) { std::cout << "File not exist" << std::endl; return 0; } // Input Descriptor cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor, /*format=*/CUDNN_TENSOR_NHWC, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor, /*format=*/CUDNN_TENSOR_NHWC, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/3, /*in_channels=*/3, /*kernel_height=*/5, /*kernel_width=*/5)); cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor, /*pad_height=*/2, /*pad_width=*/2, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, /*computeType=*/CUDNN_DATA_FLOAT)); cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, /*memoryLimitInBytes=*/0, &convolution_algorithm)); std::cout << "Convolution algorithm: " <<convolution_algorithm << std::endl; size_t workspace_bytes = 0; convolution_algorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; //convolution_algorithm = CUDNN_CONVOLUTION_FWD_ALGO_DIRECT; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl; void *d_workspace; hipMalloc(&d_workspace, workspace_bytes); std::cout << "allocate workspace" << std::endl; int batch_size; int channels; int height; int width; cudnnGetConvolution2dForwardOutputDim( convolution_descriptor, input_descriptor, kernel_descriptor, &batch_size, &channels, &height, &width); int image_bytes = batch_size * channels * height * width * sizeof(float); float *d_input; hipMalloc(&d_input, image_bytes); hipMemcpy(d_input, image.ptr<float>(0), image_bytes, hipMemcpyHostToDevice); float *d_output; hipMalloc(&d_output, image_bytes); hipMemset(d_output, 0, image_bytes); std::cout << "Height and width:" << height << " x " << width << std::endl; // Mystery kernel const float kernel_template[5][5] = {{1, 1, 1, 1, 1}, {1, 4, 4, 4, 1}, {1, 4, 12, 4, 1}, {1, 4, 4, 4, 1}, {1, 1, 1, 1, 1}}; float h_kernel[3][3][5][5]; for (int kernel = 0; kernel < 3; ++kernel) { for (int channel = 0; channel < 3; ++channel) { for (int row = 0; row < 5; ++row) { for (int column = 0; column < 5; ++column) { h_kernel[kernel][channel][row][column] = kernel_template[row][column]; } } } } float *d_kernel; hipMalloc(&d_kernel, sizeof(h_kernel)); hipMemcpy(d_kernel, h_kernel, sizeof(h_kernel), hipMemcpyHostToDevice); const float alpha = 1, beta = 0; std::cout << "Start conv" << std::endl; double timeStampA = getTimeStamp(); checkCUDNN(cudnnConvolutionForward( cudnn, &alpha, input_descriptor, d_input, kernel_descriptor, d_kernel, convolution_descriptor, convolution_algorithm, d_workspace, workspace_bytes, &beta, output_descriptor, d_output)); hipDeviceSynchronize(); double timeStampB = getTimeStamp(); float *h_output = new float[image_bytes]; hipMemcpy(h_output, d_output, image_bytes, hipMemcpyDeviceToHost); // Print result std::cout << "Total convolution time: " << timeStampB - timeStampA << std::endl; std::cout << "Save Output to " << outputfile << std::endl; save_image(outputfile, h_output, height, width); // Delete delete[] h_output; hipFree(d_kernel); hipFree(d_input); hipFree(d_output); hipFree(d_workspace); cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); }
f4459fe7a431e490ed754975b2c08dae03b40aa1.cu
#include <cudnn.h> #include <iostream> #include <opencv2/opencv.hpp> #include <sys/time.h> // Check cudnn #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } // Get time function double getTimeStamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_usec / 1000000 + tv.tv_sec; } // Load Image function cv::Mat load_image(const char *image_path) { cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR); image.convertTo(image, CV_32FC3); cv::normalize(image, image, 0, 1, cv::NORM_MINMAX); return image; } // Save Image function void save_image(const char *output_filename, float *buffer, int height, int width) { cv::Mat output_image(height, width, CV_32FC3, buffer); // Make negative values zero. cv::threshold(output_image, output_image, /*threshold=*/0, /*maxval=*/0, cv::THRESH_TOZERO); cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX); output_image.convertTo(output_image, CV_8UC3); cv::imwrite(output_filename, output_image); } int main(int argc, char *argv[]) { // Init cudnn cudaDeviceReset(); cudnnHandle_t cudnn; cudnnCreate(&cudnn); char *outputfile = (char *)"cudnn_out_55.png"; // Check input image name if (argc < 2) { std::cout << "No file input" << std::endl; return 0; } // // Check if the filename is valid char *filename = argv[1]; std::cout << argv[1] << std::endl; // Load Image cv::Mat image; image = load_image(filename); if (image.empty()) { std::cout << "File not exist" << std::endl; return 0; } // Input Descriptor cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor, /*format=*/CUDNN_TENSOR_NHWC, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor, /*format=*/CUDNN_TENSOR_NHWC, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/3, /*in_channels=*/3, /*kernel_height=*/5, /*kernel_width=*/5)); cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor, /*pad_height=*/2, /*pad_width=*/2, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, /*computeType=*/CUDNN_DATA_FLOAT)); cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, /*memoryLimitInBytes=*/0, &convolution_algorithm)); std::cout << "Convolution algorithm: " <<convolution_algorithm << std::endl; size_t workspace_bytes = 0; convolution_algorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; //convolution_algorithm = CUDNN_CONVOLUTION_FWD_ALGO_DIRECT; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl; void *d_workspace; cudaMalloc(&d_workspace, workspace_bytes); std::cout << "allocate workspace" << std::endl; int batch_size; int channels; int height; int width; cudnnGetConvolution2dForwardOutputDim( convolution_descriptor, input_descriptor, kernel_descriptor, &batch_size, &channels, &height, &width); int image_bytes = batch_size * channels * height * width * sizeof(float); float *d_input; cudaMalloc(&d_input, image_bytes); cudaMemcpy(d_input, image.ptr<float>(0), image_bytes, cudaMemcpyHostToDevice); float *d_output; cudaMalloc(&d_output, image_bytes); cudaMemset(d_output, 0, image_bytes); std::cout << "Height and width:" << height << " x " << width << std::endl; // Mystery kernel const float kernel_template[5][5] = {{1, 1, 1, 1, 1}, {1, 4, 4, 4, 1}, {1, 4, 12, 4, 1}, {1, 4, 4, 4, 1}, {1, 1, 1, 1, 1}}; float h_kernel[3][3][5][5]; for (int kernel = 0; kernel < 3; ++kernel) { for (int channel = 0; channel < 3; ++channel) { for (int row = 0; row < 5; ++row) { for (int column = 0; column < 5; ++column) { h_kernel[kernel][channel][row][column] = kernel_template[row][column]; } } } } float *d_kernel; cudaMalloc(&d_kernel, sizeof(h_kernel)); cudaMemcpy(d_kernel, h_kernel, sizeof(h_kernel), cudaMemcpyHostToDevice); const float alpha = 1, beta = 0; std::cout << "Start conv" << std::endl; double timeStampA = getTimeStamp(); checkCUDNN(cudnnConvolutionForward( cudnn, &alpha, input_descriptor, d_input, kernel_descriptor, d_kernel, convolution_descriptor, convolution_algorithm, d_workspace, workspace_bytes, &beta, output_descriptor, d_output)); cudaDeviceSynchronize(); double timeStampB = getTimeStamp(); float *h_output = new float[image_bytes]; cudaMemcpy(h_output, d_output, image_bytes, cudaMemcpyDeviceToHost); // Print result std::cout << "Total convolution time: " << timeStampB - timeStampA << std::endl; std::cout << "Save Output to " << outputfile << std::endl; save_image(outputfile, h_output, height, width); // Delete delete[] h_output; cudaFree(d_kernel); cudaFree(d_input); cudaFree(d_output); cudaFree(d_workspace); cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); }
89a387de6aeeced38430be310f500626d9a0e482.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "check.h" __global__ void border(uchar3* array, int w, int h) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= w || y >= h) { return; } if(x % 20 == 0 || x % 20 == 1 || y % 20 == 0 || y % 20 == 1) { array[y * w + x] = (uchar3) {255, 255, 255}; } } void cuda_border(uchar3 *img, int w, int h) { uchar3 *cuda = NULL; checkErr(hipMalloc(&cuda, sizeof(uchar3) * w * h)); checkErr(hipMemcpy(cuda, img, sizeof(uchar3)*w*h, hipMemcpyHostToDevice)); int block = 20; dim3 blocks((w+block) / block, h/(block)); dim3 threads(block, block); hipLaunchKernelGGL(( border), dim3(blocks), dim3(threads), 0, 0, cuda, w, h); checkErr(hipPeekAtLastError()); checkErr(hipMemcpy(img, cuda, sizeof(uchar3)*w*h, hipMemcpyDeviceToHost)); checkErr(hipFree(cuda)); }
89a387de6aeeced38430be310f500626d9a0e482.cu
#include <cuda_runtime.h> #include "check.h" __global__ void border(uchar3* array, int w, int h) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= w || y >= h) { return; } if(x % 20 == 0 || x % 20 == 1 || y % 20 == 0 || y % 20 == 1) { array[y * w + x] = (uchar3) {255, 255, 255}; } } void cuda_border(uchar3 *img, int w, int h) { uchar3 *cuda = NULL; checkErr(cudaMalloc(&cuda, sizeof(uchar3) * w * h)); checkErr(cudaMemcpy(cuda, img, sizeof(uchar3)*w*h, cudaMemcpyHostToDevice)); int block = 20; dim3 blocks((w+block) / block, h/(block)); dim3 threads(block, block); border<<<blocks, threads>>>(cuda, w, h); checkErr(cudaPeekAtLastError()); checkErr(cudaMemcpy(img, cuda, sizeof(uchar3)*w*h, cudaMemcpyDeviceToHost)); checkErr(cudaFree(cuda)); }
7fe25e3d86baced341c48a35a04a6e588686e9b5.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "hip/hip_runtime.h" #include "math.h" __global__ void bpnn_layerforward_CUDA(float* input_cuda, float* output_hidden_cuda, float* input_hidden_cuda, float* hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if (tx == 0) input_node[ty] = input_cuda[index_in]; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for (int i = 1; i <= __log2f(HEIGHT); i++) { int power_two = __powf(2, i); if (ty % power_two == 0) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two / 2][tx]; __syncthreads(); } //__syncthreads(); input_hidden_cuda[index] = weight_matrix[ty][tx]; /* for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } } */ __syncthreads(); if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(float* delta, int hid, float* ly, int in, float* w, float* oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; // eta = 0.3; // momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by == 0) { w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
7fe25e3d86baced341c48a35a04a6e588686e9b5.cu
#ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "cuda.h" #include "math.h" __global__ void bpnn_layerforward_CUDA(float* input_cuda, float* output_hidden_cuda, float* input_hidden_cuda, float* hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if (tx == 0) input_node[ty] = input_cuda[index_in]; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for (int i = 1; i <= __log2f(HEIGHT); i++) { int power_two = __powf(2, i); if (ty % power_two == 0) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two / 2][tx]; __syncthreads(); } //__syncthreads(); input_hidden_cuda[index] = weight_matrix[ty][tx]; /* for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } } */ __syncthreads(); if (tx == 0) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(float* delta, int hid, float* ly, int in, float* w, float* oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1); int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; // eta = 0.3; // momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by == 0) { w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
8a20c7853fa9460a78101fb03270665bfc3c8f21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <unistd.h> #include <stdio.h> #include <string.h> #include <iostream> #include <omp.h> #define N 500000000 using namespace std; void warmUpGPU(); __global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C); int main(int argc, char *argv[]) { double start, comp_start, mem_start; start = omp_get_wtime(); warmUpGPU(); unsigned int * A; unsigned int * B; unsigned int * C; unsigned int * C_CPU; A=(unsigned int *)malloc(sizeof(unsigned int)*N); B=(unsigned int *)malloc(sizeof(unsigned int)*N); C=(unsigned int *)malloc(sizeof(unsigned int)*N); C_CPU=(unsigned int *)malloc(sizeof(unsigned int)*N); printf("\nSize of A+B+C (GiB): %f",(sizeof(unsigned int)*N*3.0)/(1024.0*1024.0*1024.0)); //init: int i=0; for (i=0; i<N; i++){ A[i]=i; B[i]=i; C[i]=0; C_CPU[i]=0; } //CPU version: /**for (int i=0; i<N; i++){ C_CPU[i]=A[i]+B[i]; }*/ //CUDA error code: hipError_t errCode=hipSuccess; unsigned int * dev_A; unsigned int * dev_B; unsigned int * dev_C; mem_start = omp_get_wtime(); //allocate on the device: A, B, C errCode=hipMalloc((unsigned int**)&dev_A, sizeof(unsigned int)*N); if(errCode != hipSuccess) { cout << "\nError: A error with code " << errCode << endl; } errCode=hipMalloc((unsigned int**)&dev_B, sizeof(unsigned int)*N); if(errCode != hipSuccess) { cout << "\nError: B error with code " << errCode << endl; } errCode=hipMalloc((unsigned int**)&dev_C, sizeof(unsigned int)*N); if(errCode != hipSuccess) { cout << "\nError: C error with code " << errCode << endl; } //copy A to device errCode=hipMemcpy( dev_A, A, sizeof(unsigned int)*N, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } //copy B to device errCode=hipMemcpy( dev_B, B, sizeof(unsigned int)*N, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } //copy C to device (initialized to 0) errCode=hipMemcpy( dev_C, C, sizeof(unsigned int)*N, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } printf("%s%lf\n", "\nTransfer time: ", omp_get_wtime() - mem_start); //execute kernel comp_start = omp_get_wtime(); const unsigned int totalBlocks=ceil(N*1.0/1024.0); printf("\ntotal blocks: %d",totalBlocks); hipLaunchKernelGGL(( vectorAdd), dim3(totalBlocks),dim3(1024), 0, 0, dev_A, dev_B, dev_C); printf("%s%lf\n", "\nComp time: ", omp_get_wtime() - comp_start); if(errCode != hipSuccess){ cout<<"Error after kernel launch "<<errCode<<endl; } //copy data from device to host errCode=hipMemcpy( C, dev_C, sizeof(unsigned int)*N, hipMemcpyDeviceToHost); if(errCode != hipSuccess) { cout << "\nError: getting C result form GPU error with code " << errCode << endl; } printf("%s%lf\n", "\nTotal time: ", omp_get_wtime() - start); return 0; } __global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C) { unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x); if (tid>=N) { return; } C[tid]=A[tid]+B[tid]; return; } void warmUpGPU(){ hipDeviceSynchronize(); return; }
8a20c7853fa9460a78101fb03270665bfc3c8f21.cu
#include <stdlib.h> #include <unistd.h> #include <stdio.h> #include <string.h> #include <iostream> #include <omp.h> #define N 500000000 using namespace std; void warmUpGPU(); __global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C); int main(int argc, char *argv[]) { double start, comp_start, mem_start; start = omp_get_wtime(); warmUpGPU(); unsigned int * A; unsigned int * B; unsigned int * C; unsigned int * C_CPU; A=(unsigned int *)malloc(sizeof(unsigned int)*N); B=(unsigned int *)malloc(sizeof(unsigned int)*N); C=(unsigned int *)malloc(sizeof(unsigned int)*N); C_CPU=(unsigned int *)malloc(sizeof(unsigned int)*N); printf("\nSize of A+B+C (GiB): %f",(sizeof(unsigned int)*N*3.0)/(1024.0*1024.0*1024.0)); //init: int i=0; for (i=0; i<N; i++){ A[i]=i; B[i]=i; C[i]=0; C_CPU[i]=0; } //CPU version: /**for (int i=0; i<N; i++){ C_CPU[i]=A[i]+B[i]; }*/ //CUDA error code: cudaError_t errCode=cudaSuccess; unsigned int * dev_A; unsigned int * dev_B; unsigned int * dev_C; mem_start = omp_get_wtime(); //allocate on the device: A, B, C errCode=cudaMalloc((unsigned int**)&dev_A, sizeof(unsigned int)*N); if(errCode != cudaSuccess) { cout << "\nError: A error with code " << errCode << endl; } errCode=cudaMalloc((unsigned int**)&dev_B, sizeof(unsigned int)*N); if(errCode != cudaSuccess) { cout << "\nError: B error with code " << errCode << endl; } errCode=cudaMalloc((unsigned int**)&dev_C, sizeof(unsigned int)*N); if(errCode != cudaSuccess) { cout << "\nError: C error with code " << errCode << endl; } //copy A to device errCode=cudaMemcpy( dev_A, A, sizeof(unsigned int)*N, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } //copy B to device errCode=cudaMemcpy( dev_B, B, sizeof(unsigned int)*N, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } //copy C to device (initialized to 0) errCode=cudaMemcpy( dev_C, C, sizeof(unsigned int)*N, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: A memcpy error with code " << errCode << endl; } printf("%s%lf\n", "\nTransfer time: ", omp_get_wtime() - mem_start); //execute kernel comp_start = omp_get_wtime(); const unsigned int totalBlocks=ceil(N*1.0/1024.0); printf("\ntotal blocks: %d",totalBlocks); vectorAdd<<<totalBlocks,1024>>>(dev_A, dev_B, dev_C); printf("%s%lf\n", "\nComp time: ", omp_get_wtime() - comp_start); if(errCode != cudaSuccess){ cout<<"Error after kernel launch "<<errCode<<endl; } //copy data from device to host errCode=cudaMemcpy( C, dev_C, sizeof(unsigned int)*N, cudaMemcpyDeviceToHost); if(errCode != cudaSuccess) { cout << "\nError: getting C result form GPU error with code " << errCode << endl; } printf("%s%lf\n", "\nTotal time: ", omp_get_wtime() - start); return 0; } __global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C) { unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x); if (tid>=N) { return; } C[tid]=A[tid]+B[tid]; return; } void warmUpGPU(){ cudaDeviceSynchronize(); return; }
f5b43247c612252bb83b236f45cb25bce989fd62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "update_part_props.cuh" #include "fill.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/gpu_data/partitions.h> namespace NKernel { template <int BlockSize> __forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) { float4 sum; sum.x = sum.y = sum.z = sum.w = 0; stat += offset; const int warpSize = 32; const int alignSize = 4 * warpSize; { int lastId = min(size, alignSize - (offset % alignSize)); if (blockIdx == 0) { if (threadIdx.x < lastId) { sum.x += Ldg(stat + threadIdx.x); } } size = max(size - lastId, 0); stat += lastId; } //now lets align end const int unalignedTail = (size % alignSize); if (unalignedTail != 0) { if (blockIdx == 0) { const int tailOffset = size - unalignedTail; if (threadIdx.x < unalignedTail) { sum.y += Ldg(stat + tailOffset + threadIdx.x); } } } size -= unalignedTail; const int entriesPerWarp = warpSize * 4; const int warpsPerBlock = (BlockSize / 32); const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32); stat += globalWarpId * entriesPerWarp; size = max(size - globalWarpId * entriesPerWarp, 0); const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount; const int localIdx = (threadIdx.x & 31) * 4; const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize; stat += localIdx; if (size > 0) { for (int i = 0; i < iterCount; ++i) { const float4* stat4 = (const float4*) stat; float4 val = Ldg(stat4); sum.x += val.x; sum.y += val.y; sum.z += val.z; sum.w += val.w; stat += stripeSize; } } return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w; }; template <class TOutput> __global__ void SaveResultsImpl(const ui32* partIds, const double* tempVars, ui32 partCount, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < partCount * statCount) { const ui32 leafId = partIds != nullptr ? partIds[y] : y; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += tempVars[i]; tempVars += statCount * partCount; } statSums[leafId * statCount + statId] = total; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets, const float* source, ui64 statLineSize, double* statPartSums) { const ui32 partOffset = __ldg(offsets + blockIdx.y); const ui32 partSize = __ldg(offsets + blockIdx.y + 1) - partOffset; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum <BlockSize> (source, partOffset, partSize, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { const int statCount = gridDim.z; const int partCount = gridDim.y; const int lineSize = statCount * partCount; ui64 idx = blockIdx.x * lineSize + blockIdx.y * statCount + statId; statPartSums[idx] = result; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsImpl(const ui32* partIds, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { const ui32 leafId = partIds[blockIdx.y]; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum < BlockSize > (source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } void UpdatePartitionsProps(const TDataPartition* parts, const ui32* partIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = partCount; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); hipLaunchKernelGGL(( UpdatePartitionsPropsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, partIds, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; hipLaunchKernelGGL(( SaveResultsImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, partIds, tempVars, partCount, statCount, numBlocks.x, statSums); } } void UpdatePartitionsPropsForSplit(const TDataPartition* parts, const ui32* leftPartIds, const ui32* rightPartIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream) { //TODO(noxoomo): if it'll be "slow", could be made in one kernel UpdatePartitionsProps(parts, leftPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream); UpdatePartitionsProps(parts, rightPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream); } void UpdatePartitionsPropsForOffsets(const ui32* offsets, ui32 count, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = count; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); hipLaunchKernelGGL(( UpdatePartitionsPropsForOffsetsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, offsets, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (count * statCount + saveBlockSize - 1) / saveBlockSize; hipLaunchKernelGGL(( SaveResultsImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, nullptr, tempVars, count, statCount, numBlocks.x, statSums); } } __global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { WriteThrough(dst + i, (double)__ldg(src + i)); } } void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) { const ui32 blockSize = 128; const ui32 numBlocks = CeilDivide(size, blockSize); if (numBlocks) { hipLaunchKernelGGL(( FloatToDoubleImpl), dim3(numBlocks), dim3(blockSize), 0, stream, src, size, dst); } } ui32 GetTempVarsCount(ui32 statCount, ui32 count) { return CeilDivide(2 * TArchProps::SMCount(), (int)statCount) * statCount * count; } }
f5b43247c612252bb83b236f45cb25bce989fd62.cu
#include "update_part_props.cuh" #include "fill.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/gpu_data/partitions.h> namespace NKernel { template <int BlockSize> __forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) { float4 sum; sum.x = sum.y = sum.z = sum.w = 0; stat += offset; const int warpSize = 32; const int alignSize = 4 * warpSize; { int lastId = min(size, alignSize - (offset % alignSize)); if (blockIdx == 0) { if (threadIdx.x < lastId) { sum.x += Ldg(stat + threadIdx.x); } } size = max(size - lastId, 0); stat += lastId; } //now lets align end const int unalignedTail = (size % alignSize); if (unalignedTail != 0) { if (blockIdx == 0) { const int tailOffset = size - unalignedTail; if (threadIdx.x < unalignedTail) { sum.y += Ldg(stat + tailOffset + threadIdx.x); } } } size -= unalignedTail; const int entriesPerWarp = warpSize * 4; const int warpsPerBlock = (BlockSize / 32); const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32); stat += globalWarpId * entriesPerWarp; size = max(size - globalWarpId * entriesPerWarp, 0); const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount; const int localIdx = (threadIdx.x & 31) * 4; const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize; stat += localIdx; if (size > 0) { for (int i = 0; i < iterCount; ++i) { const float4* stat4 = (const float4*) stat; float4 val = Ldg(stat4); sum.x += val.x; sum.y += val.y; sum.z += val.z; sum.w += val.w; stat += stripeSize; } } return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w; }; template <class TOutput> __global__ void SaveResultsImpl(const ui32* partIds, const double* tempVars, ui32 partCount, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < partCount * statCount) { const ui32 leafId = partIds != nullptr ? partIds[y] : y; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += tempVars[i]; tempVars += statCount * partCount; } statSums[leafId * statCount + statId] = total; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets, const float* source, ui64 statLineSize, double* statPartSums) { const ui32 partOffset = __ldg(offsets + blockIdx.y); const ui32 partSize = __ldg(offsets + blockIdx.y + 1) - partOffset; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum <BlockSize> (source, partOffset, partSize, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { const int statCount = gridDim.z; const int partCount = gridDim.y; const int lineSize = statCount * partCount; ui64 idx = blockIdx.x * lineSize + blockIdx.y * statCount + statId; statPartSums[idx] = result; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsImpl(const ui32* partIds, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { const ui32 leafId = partIds[blockIdx.y]; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum < BlockSize > (source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } void UpdatePartitionsProps(const TDataPartition* parts, const ui32* partIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = partCount; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); UpdatePartitionsPropsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(partIds, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; SaveResultsImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(partIds, tempVars, partCount, statCount, numBlocks.x, statSums); } } void UpdatePartitionsPropsForSplit(const TDataPartition* parts, const ui32* leftPartIds, const ui32* rightPartIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream) { //TODO(noxoomo): if it'll be "slow", could be made in one kernel UpdatePartitionsProps(parts, leftPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream); UpdatePartitionsProps(parts, rightPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream); } void UpdatePartitionsPropsForOffsets(const ui32* offsets, ui32 count, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = count; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); UpdatePartitionsPropsForOffsetsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(offsets, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (count * statCount + saveBlockSize - 1) / saveBlockSize; SaveResultsImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(nullptr, tempVars, count, statCount, numBlocks.x, statSums); } } __global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { WriteThrough(dst + i, (double)__ldg(src + i)); } } void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) { const ui32 blockSize = 128; const ui32 numBlocks = CeilDivide(size, blockSize); if (numBlocks) { FloatToDoubleImpl<<<numBlocks, blockSize, 0, stream>>>(src, size, dst); } } ui32 GetTempVarsCount(ui32 statCount, ui32 count) { return CeilDivide(2 * TArchProps::SMCount(), (int)statCount) * statCount * count; } }
47a52e861fda239b59cc9117c04181043cc9235e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <linalg/add.cuh> #include <linalg/subtract.cuh> #include <linalg/unary_op.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T, typename IdxType = int> struct DevScalarInputs { T tolerance; IdxType len; T scalar; bool add; unsigned long long int seed; }; // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename T, typename IdxType = int> void unaryOpLaunch(T *out, const T *in, T scalar, IdxType len, bool add, hipStream_t stream) { unaryOp( out, in, len, [scalar, add] __device__(T in) { return add ? in + scalar : in - scalar; }, stream); } template <typename T, typename IdxType> class DevScalarTest : public ::testing::TestWithParam<DevScalarInputs<T, IdxType>> { protected: void SetUp() override { params = ::testing::TestWithParam<DevScalarInputs<T, IdxType>>::GetParam(); Random::Rng r(params.seed); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); auto len = params.len; allocate(in, len); allocate(out_ref, len); allocate(out, len); allocate(scalar, (size_t)1); updateDevice(scalar, &params.scalar, 1, stream); r.uniform(in, len, T(-1.0), T(1.0), stream); unaryOpLaunch(out_ref, in, params.scalar, len, params.add, stream); if (params.add) { addDevScalar(out, in, scalar, len, stream); } else { subtractDevScalar(out, in, scalar, len, stream); } CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); CUDA_CHECK(hipFree(scalar)); } protected: DevScalarInputs<T, IdxType> params; T *in, *out_ref, *out, *scalar; }; const std::vector<DevScalarInputs<float, int>> inputsf_i32 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, int> DevScalarTestF_i32; TEST_P(DevScalarTestF_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i32, ::testing::ValuesIn(inputsf_i32)); const std::vector<DevScalarInputs<float, size_t>> inputsf_i64 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, size_t> DevScalarTestF_i64; TEST_P(DevScalarTestF_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i64, ::testing::ValuesIn(inputsf_i64)); const std::vector<DevScalarInputs<double, int>> inputsd_i32 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, int> DevScalarTestD_i32; TEST_P(DevScalarTestD_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i32, ::testing::ValuesIn(inputsd_i32)); const std::vector<DevScalarInputs<double, size_t>> inputsd_i64 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, size_t> DevScalarTestD_i64; TEST_P(DevScalarTestD_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i64, ::testing::ValuesIn(inputsd_i64)); } // end namespace LinAlg } // end namespace MLCommon
47a52e861fda239b59cc9117c04181043cc9235e.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <linalg/add.cuh> #include <linalg/subtract.cuh> #include <linalg/unary_op.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T, typename IdxType = int> struct DevScalarInputs { T tolerance; IdxType len; T scalar; bool add; unsigned long long int seed; }; // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename T, typename IdxType = int> void unaryOpLaunch(T *out, const T *in, T scalar, IdxType len, bool add, cudaStream_t stream) { unaryOp( out, in, len, [scalar, add] __device__(T in) { return add ? in + scalar : in - scalar; }, stream); } template <typename T, typename IdxType> class DevScalarTest : public ::testing::TestWithParam<DevScalarInputs<T, IdxType>> { protected: void SetUp() override { params = ::testing::TestWithParam<DevScalarInputs<T, IdxType>>::GetParam(); Random::Rng r(params.seed); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); auto len = params.len; allocate(in, len); allocate(out_ref, len); allocate(out, len); allocate(scalar, (size_t)1); updateDevice(scalar, &params.scalar, 1, stream); r.uniform(in, len, T(-1.0), T(1.0), stream); unaryOpLaunch(out_ref, in, params.scalar, len, params.add, stream); if (params.add) { addDevScalar(out, in, scalar, len, stream); } else { subtractDevScalar(out, in, scalar, len, stream); } CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); CUDA_CHECK(cudaFree(scalar)); } protected: DevScalarInputs<T, IdxType> params; T *in, *out_ref, *out, *scalar; }; const std::vector<DevScalarInputs<float, int>> inputsf_i32 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, int> DevScalarTestF_i32; TEST_P(DevScalarTestF_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i32, ::testing::ValuesIn(inputsf_i32)); const std::vector<DevScalarInputs<float, size_t>> inputsf_i64 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, size_t> DevScalarTestF_i64; TEST_P(DevScalarTestF_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i64, ::testing::ValuesIn(inputsf_i64)); const std::vector<DevScalarInputs<double, int>> inputsd_i32 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, int> DevScalarTestD_i32; TEST_P(DevScalarTestD_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i32, ::testing::ValuesIn(inputsd_i32)); const std::vector<DevScalarInputs<double, size_t>> inputsd_i64 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, size_t> DevScalarTestD_i64; TEST_P(DevScalarTestD_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i64, ::testing::ValuesIn(inputsd_i64)); } // end namespace LinAlg } // end namespace MLCommon
c1bac6e9f2e55fe06bddc95b2be42586df4c3f4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" namespace cv { namespace gpu { namespace device { namespace stereobm { ////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////// Stereo BM //////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// #define ROWSperTHREAD 21 // the number of rows a thread will process #define BLOCK_W 128 // the thread block width (464) #define N_DISPARITIES 8 #define STEREO_MIND 0 // The minimum d range to check #define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing __constant__ unsigned int* cminSSDImage; __constant__ size_t cminSSD_step; __constant__ int cwidth; __constant__ int cheight; __device__ __forceinline__ int SQ(int a) { return a * a; } template<int RADIUS> __device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd) { unsigned int cache = 0; unsigned int cache2 = 0; for(int i = 1; i <= RADIUS; i++) cache += col_ssd[i]; col_ssd_cache[0] = cache; __syncthreads(); if (threadIdx.x < BLOCK_W - RADIUS) cache2 = col_ssd_cache[RADIUS]; else for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++) cache2 += col_ssd[i]; return col_ssd[0] + cache + cache2; } template<int RADIUS> __device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd) { unsigned int ssd[N_DISPARITIES]; //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS)); int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7]))); int bestIdx = 0; for (int i = 0; i < N_DISPARITIES; i++) { if (mssd == ssd[i]) bestIdx = i; } return make_uint2(mssd, bestIdx); } template<int RADIUS> __device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd) { unsigned char leftPixel1; unsigned char leftPixel2; unsigned char rightPixel1[8]; unsigned char rightPixel2[8]; unsigned int diff1, diff2; leftPixel1 = imageL[idx1]; leftPixel2 = imageL[idx2]; idx1 = idx1 - d; idx2 = idx2 - d; rightPixel1[7] = imageR[idx1 - 7]; rightPixel1[0] = imageR[idx1 - 0]; rightPixel1[1] = imageR[idx1 - 1]; rightPixel1[2] = imageR[idx1 - 2]; rightPixel1[3] = imageR[idx1 - 3]; rightPixel1[4] = imageR[idx1 - 4]; rightPixel1[5] = imageR[idx1 - 5]; rightPixel1[6] = imageR[idx1 - 6]; rightPixel2[7] = imageR[idx2 - 7]; rightPixel2[0] = imageR[idx2 - 0]; rightPixel2[1] = imageR[idx2 - 1]; rightPixel2[2] = imageR[idx2 - 2]; rightPixel2[3] = imageR[idx2 - 3]; rightPixel2[4] = imageR[idx2 - 4]; rightPixel2[5] = imageR[idx2 - 5]; rightPixel2[6] = imageR[idx2 - 6]; //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) diff1 = leftPixel1 - rightPixel1[0]; diff2 = leftPixel2 - rightPixel2[0]; col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[1]; diff2 = leftPixel2 - rightPixel2[1]; col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[2]; diff2 = leftPixel2 - rightPixel2[2]; col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[3]; diff2 = leftPixel2 - rightPixel2[3]; col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[4]; diff2 = leftPixel2 - rightPixel2[4]; col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[5]; diff2 = leftPixel2 - rightPixel2[5]; col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[6]; diff2 = leftPixel2 - rightPixel2[6]; col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[7]; diff2 = leftPixel2 - rightPixel2[7]; col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); } template<int RADIUS> __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd) { unsigned char leftPixel1; int idx; unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0}; for(int i = 0; i < (2 * RADIUS + 1); i++) { idx = y_tex * im_pitch + x_tex; leftPixel1 = imageL[idx]; idx = idx - d; diffa[0] += SQ(leftPixel1 - imageR[idx - 0]); diffa[1] += SQ(leftPixel1 - imageR[idx - 1]); diffa[2] += SQ(leftPixel1 - imageR[idx - 2]); diffa[3] += SQ(leftPixel1 - imageR[idx - 3]); diffa[4] += SQ(leftPixel1 - imageR[idx - 4]); diffa[5] += SQ(leftPixel1 - imageR[idx - 5]); diffa[6] += SQ(leftPixel1 - imageR[idx - 6]); diffa[7] += SQ(leftPixel1 - imageR[idx - 7]); y_tex += 1; } //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0]; col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1]; col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2]; col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3]; col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4]; col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5]; col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6]; col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7]; } template<int RADIUS> __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp) { extern __shared__ unsigned int col_ssd_cache[]; volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x; volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0; //#define N_DIRTY_PIXELS (2 * RADIUS) //#define X (blockIdx.x * BLOCK_W + threadIdx.x + STEREO_MAXD) int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS); //#define Y (__mul24(blockIdx.y, ROWSperTHREAD) + RADIUS) #define Y (blockIdx.y * ROWSperTHREAD + RADIUS) //int Y = blockIdx.y * ROWSperTHREAD + RADIUS; unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step; unsigned char* disparImage = disp.data + X + Y * disp.step; /* if (X < cwidth) { unsigned int *minSSDImage_end = minSSDImage + min(ROWSperTHREAD, cheight - Y) * minssd_step; for(uint *ptr = minSSDImage; ptr != minSSDImage_end; ptr += minssd_step ) *ptr = 0xFFFFFFFF; }*/ int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS); int y_tex; int x_tex = X - RADIUS; if (x_tex >= cwidth) return; for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP) { y_tex = Y - RADIUS; InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd); if (col_ssd_extra > 0) if (x_tex + BLOCK_W < cwidth) InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra); __syncthreads(); //before MinSSD function if (X < cwidth - RADIUS && Y < cheight - RADIUS) { uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd); if (minSSD.x < minSSDImage[0]) { disparImage[0] = (unsigned char)(d + minSSD.y); minSSDImage[0] = minSSD.x; } } for(int row = 1; row < end_row; row++) { int idx1 = y_tex * img_step + x_tex; int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex; __syncthreads(); StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd); if (col_ssd_extra) if (x_tex + BLOCK_W < cwidth) StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra); y_tex += 1; __syncthreads(); //before MinSSD function if (X < cwidth - RADIUS && row < cheight - RADIUS - Y) { int idx = row * cminSSD_step; uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd); if (minSSD.x < minSSDImage[idx]) { disparImage[disp.step * row] = (unsigned char)(d + minSSD.y); minSSDImage[idx] = minSSD.x; } } } // for row loop } // for d loop } template<int RADIUS> void kernel_caller(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, hipStream_t & stream) { dim3 grid(1,1,1); dim3 threads(BLOCK_W, 1, 1); grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W); grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD); //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int); hipLaunchKernelGGL(( stereoKernel<RADIUS>), dim3(grid), dim3(threads), smem_size, stream, left.data, right.data, left.step, disp, maxdisp); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); }; typedef void (*kernel_caller_t)(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, hipStream_t & stream); const static kernel_caller_t callers[] = { 0, kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>, kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>, kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<15>, kernel_caller<15>, kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>, kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25> //0,0,0, 0,0,0, 0,0,kernel_caller<9> }; const int calles_num = sizeof(callers)/sizeof(callers[0]); void stereoBM_GPU(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, int winsz, const PtrStepSz<unsigned int>& minSSD_buf, hipStream_t& stream) { int winsz2 = winsz >> 1; if (winsz2 == 0 || winsz2 >= calles_num) cv::gpu::error("Unsupported window size", __FILE__, __LINE__, "stereoBM_GPU"); //cudaSafeCall( hipFuncSetCacheConfig(&stereoKernel, hipFuncCachePreferL1) ); //cudaSafeCall( hipFuncSetCacheConfig(&stereoKernel, hipFuncCachePreferShared) ); cudaSafeCall( hipMemset2D(disp.data, disp.step, 0, disp.cols, disp.rows) ); cudaSafeCall( hipMemset2D(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows) ); cudaSafeCall( hipMemcpyToSymbol( cwidth, &left.cols, sizeof(left.cols) ) ); cudaSafeCall( hipMemcpyToSymbol( cheight, &left.rows, sizeof(left.rows) ) ); cudaSafeCall( hipMemcpyToSymbol( cminSSDImage, &minSSD_buf.data, sizeof(minSSD_buf.data) ) ); size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize(); cudaSafeCall( hipMemcpyToSymbol( cminSSD_step, &minssd_step, sizeof(minssd_step) ) ); callers[winsz2](left, right, disp, maxdisp, stream); } ////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////// Sobel Prefiler /////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// texture<unsigned char, 2, hipReadModeElementType> texForSobel; __global__ void prefilter_kernel(PtrStepSzb output, int prefilterCap) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < output.cols && y < output.rows) { int conv = (int)tex2D(texForSobel, x - 1, y - 1) * (-1) + (int)tex2D(texForSobel, x + 1, y - 1) * (1) + (int)tex2D(texForSobel, x - 1, y ) * (-2) + (int)tex2D(texForSobel, x + 1, y ) * (2) + (int)tex2D(texForSobel, x - 1, y + 1) * (-1) + (int)tex2D(texForSobel, x + 1, y + 1) * (1); conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255); output.ptr(y)[x] = conv & 0xFF; } } void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, hipStream_t & stream) { hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>(); cudaSafeCall( hipBindTexture2D( 0, texForSobel, input.data, desc, input.cols, input.rows, input.step ) ); dim3 threads(16, 16, 1); dim3 grid(1, 1, 1); grid.x = divUp(input.cols, threads.x); grid.y = divUp(input.rows, threads.y); hipLaunchKernelGGL(( prefilter_kernel), dim3(grid), dim3(threads), 0, stream, output, prefilterCap); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); cudaSafeCall( hipUnbindTexture (texForSobel ) ); } ////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////// Textureness filtering //////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// texture<unsigned char, 2, hipReadModeNormalizedFloat> texForTF; __device__ __forceinline__ float sobel(int x, int y) { float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) + tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) + tex2D(texForTF, x - 1, y + 1) * (-1) + tex2D(texForTF, x + 1, y + 1) * (1); return fabs(conv); } __device__ float CalcSums(float *cols, float *cols_cache, int winsz) { float cache = 0; float cache2 = 0; int winsz2 = winsz/2; for(int i = 1; i <= winsz2; i++) cache += cols[i]; cols_cache[0] = cache; __syncthreads(); if (threadIdx.x < blockDim.x - winsz2) cache2 = cols_cache[winsz2]; else for(int i = winsz2 + 1; i < winsz; i++) cache2 += cols[i]; return cols[0] + cache + cache2; } #define RpT (2 * ROWSperTHREAD) // got experimentally __global__ void textureness_kernel(PtrStepSzb disp, int winsz, float threshold) { int winsz2 = winsz/2; int n_dirty_pixels = (winsz2) * 2; extern __shared__ float cols_cache[]; float *cols = cols_cache + blockDim.x + threadIdx.x; float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0; int x = blockIdx.x * blockDim.x + threadIdx.x; int beg_row = blockIdx.y * RpT; int end_row = ::min(beg_row + RpT, disp.rows); if (x < disp.cols) { int y = beg_row; float sum = 0; float sum_extra = 0; for(int i = y - winsz2; i <= y + winsz2; ++i) { sum += sobel(x - winsz2, i); if (cols_extra) sum_extra += sobel(x + blockDim.x - winsz2, i); } *cols = sum; if (cols_extra) *cols_extra = sum_extra; __syncthreads(); float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255; if (sum_win < threshold) disp.data[y * disp.step + x] = 0; __syncthreads(); for(int y = beg_row + 1; y < end_row; ++y) { sum = sum - sobel(x - winsz2, y - winsz2 - 1) + sobel(x - winsz2, y + winsz2); *cols = sum; if (cols_extra) { sum_extra = sum_extra - sobel(x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(x + blockDim.x - winsz2, y + winsz2); *cols_extra = sum_extra; } __syncthreads(); float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255; if (sum_win < threshold) disp.data[y * disp.step + x] = 0; __syncthreads(); } } } void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, hipStream_t & stream) { avgTexturenessThreshold *= winsz * winsz; texForTF.filterMode = hipFilterModeLinear; texForTF.addressMode[0] = hipAddressModeWrap; texForTF.addressMode[1] = hipAddressModeWrap; hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>(); cudaSafeCall( hipBindTexture2D( 0, texForTF, input.data, desc, input.cols, input.rows, input.step ) ); dim3 threads(128, 1, 1); dim3 grid(1, 1, 1); grid.x = divUp(input.cols, threads.x); grid.y = divUp(input.rows, RpT); size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float); hipLaunchKernelGGL(( textureness_kernel), dim3(grid), dim3(threads), smem_size, stream, disp, winsz, avgTexturenessThreshold); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); cudaSafeCall( hipUnbindTexture (texForTF) ); } } // namespace stereobm }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
c1bac6e9f2e55fe06bddc95b2be42586df4c3f4c.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" namespace cv { namespace gpu { namespace device { namespace stereobm { ////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////// Stereo BM //////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// #define ROWSperTHREAD 21 // the number of rows a thread will process #define BLOCK_W 128 // the thread block width (464) #define N_DISPARITIES 8 #define STEREO_MIND 0 // The minimum d range to check #define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing __constant__ unsigned int* cminSSDImage; __constant__ size_t cminSSD_step; __constant__ int cwidth; __constant__ int cheight; __device__ __forceinline__ int SQ(int a) { return a * a; } template<int RADIUS> __device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd) { unsigned int cache = 0; unsigned int cache2 = 0; for(int i = 1; i <= RADIUS; i++) cache += col_ssd[i]; col_ssd_cache[0] = cache; __syncthreads(); if (threadIdx.x < BLOCK_W - RADIUS) cache2 = col_ssd_cache[RADIUS]; else for(int i = RADIUS + 1; i < (2 * RADIUS + 1); i++) cache2 += col_ssd[i]; return col_ssd[0] + cache + cache2; } template<int RADIUS> __device__ uint2 MinSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd) { unsigned int ssd[N_DISPARITIES]; //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[3] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 3 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[4] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 4 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[5] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 5 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[6] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 6 * (BLOCK_W + 2 * RADIUS)); __syncthreads(); ssd[7] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 7 * (BLOCK_W + 2 * RADIUS)); int mssd = ::min(::min(::min(ssd[0], ssd[1]), ::min(ssd[4], ssd[5])), ::min(::min(ssd[2], ssd[3]), ::min(ssd[6], ssd[7]))); int bestIdx = 0; for (int i = 0; i < N_DISPARITIES; i++) { if (mssd == ssd[i]) bestIdx = i; } return make_uint2(mssd, bestIdx); } template<int RADIUS> __device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd) { unsigned char leftPixel1; unsigned char leftPixel2; unsigned char rightPixel1[8]; unsigned char rightPixel2[8]; unsigned int diff1, diff2; leftPixel1 = imageL[idx1]; leftPixel2 = imageL[idx2]; idx1 = idx1 - d; idx2 = idx2 - d; rightPixel1[7] = imageR[idx1 - 7]; rightPixel1[0] = imageR[idx1 - 0]; rightPixel1[1] = imageR[idx1 - 1]; rightPixel1[2] = imageR[idx1 - 2]; rightPixel1[3] = imageR[idx1 - 3]; rightPixel1[4] = imageR[idx1 - 4]; rightPixel1[5] = imageR[idx1 - 5]; rightPixel1[6] = imageR[idx1 - 6]; rightPixel2[7] = imageR[idx2 - 7]; rightPixel2[0] = imageR[idx2 - 0]; rightPixel2[1] = imageR[idx2 - 1]; rightPixel2[2] = imageR[idx2 - 2]; rightPixel2[3] = imageR[idx2 - 3]; rightPixel2[4] = imageR[idx2 - 4]; rightPixel2[5] = imageR[idx2 - 5]; rightPixel2[6] = imageR[idx2 - 6]; //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) diff1 = leftPixel1 - rightPixel1[0]; diff2 = leftPixel2 - rightPixel2[0]; col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[1]; diff2 = leftPixel2 - rightPixel2[1]; col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[2]; diff2 = leftPixel2 - rightPixel2[2]; col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[3]; diff2 = leftPixel2 - rightPixel2[3]; col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[4]; diff2 = leftPixel2 - rightPixel2[4]; col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[5]; diff2 = leftPixel2 - rightPixel2[5]; col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[6]; diff2 = leftPixel2 - rightPixel2[6]; col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); diff1 = leftPixel1 - rightPixel1[7]; diff2 = leftPixel2 - rightPixel2[7]; col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); } template<int RADIUS> __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* imageL, unsigned char* imageR, int d, volatile unsigned int *col_ssd) { unsigned char leftPixel1; int idx; unsigned int diffa[] = {0, 0, 0, 0, 0, 0, 0, 0}; for(int i = 0; i < (2 * RADIUS + 1); i++) { idx = y_tex * im_pitch + x_tex; leftPixel1 = imageL[idx]; idx = idx - d; diffa[0] += SQ(leftPixel1 - imageR[idx - 0]); diffa[1] += SQ(leftPixel1 - imageR[idx - 1]); diffa[2] += SQ(leftPixel1 - imageR[idx - 2]); diffa[3] += SQ(leftPixel1 - imageR[idx - 3]); diffa[4] += SQ(leftPixel1 - imageR[idx - 4]); diffa[5] += SQ(leftPixel1 - imageR[idx - 5]); diffa[6] += SQ(leftPixel1 - imageR[idx - 6]); diffa[7] += SQ(leftPixel1 - imageR[idx - 7]); y_tex += 1; } //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0]; col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1]; col_ssd[2 * (BLOCK_W + 2 * RADIUS)] = diffa[2]; col_ssd[3 * (BLOCK_W + 2 * RADIUS)] = diffa[3]; col_ssd[4 * (BLOCK_W + 2 * RADIUS)] = diffa[4]; col_ssd[5 * (BLOCK_W + 2 * RADIUS)] = diffa[5]; col_ssd[6 * (BLOCK_W + 2 * RADIUS)] = diffa[6]; col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7]; } template<int RADIUS> __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, PtrStepb disp, int maxdisp) { extern __shared__ unsigned int col_ssd_cache[]; volatile unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x; volatile unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0; //#define N_DIRTY_PIXELS (2 * RADIUS) //#define X (blockIdx.x * BLOCK_W + threadIdx.x + STEREO_MAXD) int X = (blockIdx.x * BLOCK_W + threadIdx.x + maxdisp + RADIUS); //#define Y (__mul24(blockIdx.y, ROWSperTHREAD) + RADIUS) #define Y (blockIdx.y * ROWSperTHREAD + RADIUS) //int Y = blockIdx.y * ROWSperTHREAD + RADIUS; unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step; unsigned char* disparImage = disp.data + X + Y * disp.step; /* if (X < cwidth) { unsigned int *minSSDImage_end = minSSDImage + min(ROWSperTHREAD, cheight - Y) * minssd_step; for(uint *ptr = minSSDImage; ptr != minSSDImage_end; ptr += minssd_step ) *ptr = 0xFFFFFFFF; }*/ int end_row = ::min(ROWSperTHREAD, cheight - Y - RADIUS); int y_tex; int x_tex = X - RADIUS; if (x_tex >= cwidth) return; for(int d = STEREO_MIND; d < maxdisp; d += STEREO_DISP_STEP) { y_tex = Y - RADIUS; InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd); if (col_ssd_extra > 0) if (x_tex + BLOCK_W < cwidth) InitColSSD<RADIUS>(x_tex + BLOCK_W, y_tex, img_step, left, right, d, col_ssd_extra); __syncthreads(); //before MinSSD function if (X < cwidth - RADIUS && Y < cheight - RADIUS) { uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd); if (minSSD.x < minSSDImage[0]) { disparImage[0] = (unsigned char)(d + minSSD.y); minSSDImage[0] = minSSD.x; } } for(int row = 1; row < end_row; row++) { int idx1 = y_tex * img_step + x_tex; int idx2 = (y_tex + (2 * RADIUS + 1)) * img_step + x_tex; __syncthreads(); StepDown<RADIUS>(idx1, idx2, left, right, d, col_ssd); if (col_ssd_extra) if (x_tex + BLOCK_W < cwidth) StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra); y_tex += 1; __syncthreads(); //before MinSSD function if (X < cwidth - RADIUS && row < cheight - RADIUS - Y) { int idx = row * cminSSD_step; uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd); if (minSSD.x < minSSDImage[idx]) { disparImage[disp.step * row] = (unsigned char)(d + minSSD.y); minSSDImage[idx] = minSSD.x; } } } // for row loop } // for d loop } template<int RADIUS> void kernel_caller(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, cudaStream_t & stream) { dim3 grid(1,1,1); dim3 threads(BLOCK_W, 1, 1); grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W); grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD); //See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int); stereoKernel<RADIUS><<<grid, threads, smem_size, stream>>>(left.data, right.data, left.step, disp, maxdisp); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); }; typedef void (*kernel_caller_t)(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, cudaStream_t & stream); const static kernel_caller_t callers[] = { 0, kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>, kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>, kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<15>, kernel_caller<15>, kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>, kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25> //0,0,0, 0,0,0, 0,0,kernel_caller<9> }; const int calles_num = sizeof(callers)/sizeof(callers[0]); void stereoBM_GPU(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& disp, int maxdisp, int winsz, const PtrStepSz<unsigned int>& minSSD_buf, cudaStream_t& stream) { int winsz2 = winsz >> 1; if (winsz2 == 0 || winsz2 >= calles_num) cv::gpu::error("Unsupported window size", __FILE__, __LINE__, "stereoBM_GPU"); //cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferL1) ); //cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferShared) ); cudaSafeCall( cudaMemset2D(disp.data, disp.step, 0, disp.cols, disp.rows) ); cudaSafeCall( cudaMemset2D(minSSD_buf.data, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows) ); cudaSafeCall( cudaMemcpyToSymbol( cwidth, &left.cols, sizeof(left.cols) ) ); cudaSafeCall( cudaMemcpyToSymbol( cheight, &left.rows, sizeof(left.rows) ) ); cudaSafeCall( cudaMemcpyToSymbol( cminSSDImage, &minSSD_buf.data, sizeof(minSSD_buf.data) ) ); size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize(); cudaSafeCall( cudaMemcpyToSymbol( cminSSD_step, &minssd_step, sizeof(minssd_step) ) ); callers[winsz2](left, right, disp, maxdisp, stream); } ////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////// Sobel Prefiler /////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// texture<unsigned char, 2, cudaReadModeElementType> texForSobel; __global__ void prefilter_kernel(PtrStepSzb output, int prefilterCap) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < output.cols && y < output.rows) { int conv = (int)tex2D(texForSobel, x - 1, y - 1) * (-1) + (int)tex2D(texForSobel, x + 1, y - 1) * (1) + (int)tex2D(texForSobel, x - 1, y ) * (-2) + (int)tex2D(texForSobel, x + 1, y ) * (2) + (int)tex2D(texForSobel, x - 1, y + 1) * (-1) + (int)tex2D(texForSobel, x + 1, y + 1) * (1); conv = ::min(::min(::max(-prefilterCap, conv), prefilterCap) + prefilterCap, 255); output.ptr(y)[x] = conv & 0xFF; } } void prefilter_xsobel(const PtrStepSzb& input, const PtrStepSzb& output, int prefilterCap, cudaStream_t & stream) { cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>(); cudaSafeCall( cudaBindTexture2D( 0, texForSobel, input.data, desc, input.cols, input.rows, input.step ) ); dim3 threads(16, 16, 1); dim3 grid(1, 1, 1); grid.x = divUp(input.cols, threads.x); grid.y = divUp(input.rows, threads.y); prefilter_kernel<<<grid, threads, 0, stream>>>(output, prefilterCap); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaUnbindTexture (texForSobel ) ); } ////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////// Textureness filtering //////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// texture<unsigned char, 2, cudaReadModeNormalizedFloat> texForTF; __device__ __forceinline__ float sobel(int x, int y) { float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) + tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) + tex2D(texForTF, x - 1, y + 1) * (-1) + tex2D(texForTF, x + 1, y + 1) * (1); return fabs(conv); } __device__ float CalcSums(float *cols, float *cols_cache, int winsz) { float cache = 0; float cache2 = 0; int winsz2 = winsz/2; for(int i = 1; i <= winsz2; i++) cache += cols[i]; cols_cache[0] = cache; __syncthreads(); if (threadIdx.x < blockDim.x - winsz2) cache2 = cols_cache[winsz2]; else for(int i = winsz2 + 1; i < winsz; i++) cache2 += cols[i]; return cols[0] + cache + cache2; } #define RpT (2 * ROWSperTHREAD) // got experimentally __global__ void textureness_kernel(PtrStepSzb disp, int winsz, float threshold) { int winsz2 = winsz/2; int n_dirty_pixels = (winsz2) * 2; extern __shared__ float cols_cache[]; float *cols = cols_cache + blockDim.x + threadIdx.x; float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0; int x = blockIdx.x * blockDim.x + threadIdx.x; int beg_row = blockIdx.y * RpT; int end_row = ::min(beg_row + RpT, disp.rows); if (x < disp.cols) { int y = beg_row; float sum = 0; float sum_extra = 0; for(int i = y - winsz2; i <= y + winsz2; ++i) { sum += sobel(x - winsz2, i); if (cols_extra) sum_extra += sobel(x + blockDim.x - winsz2, i); } *cols = sum; if (cols_extra) *cols_extra = sum_extra; __syncthreads(); float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255; if (sum_win < threshold) disp.data[y * disp.step + x] = 0; __syncthreads(); for(int y = beg_row + 1; y < end_row; ++y) { sum = sum - sobel(x - winsz2, y - winsz2 - 1) + sobel(x - winsz2, y + winsz2); *cols = sum; if (cols_extra) { sum_extra = sum_extra - sobel(x + blockDim.x - winsz2, y - winsz2 - 1) + sobel(x + blockDim.x - winsz2, y + winsz2); *cols_extra = sum_extra; } __syncthreads(); float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255; if (sum_win < threshold) disp.data[y * disp.step + x] = 0; __syncthreads(); } } } void postfilter_textureness(const PtrStepSzb& input, int winsz, float avgTexturenessThreshold, const PtrStepSzb& disp, cudaStream_t & stream) { avgTexturenessThreshold *= winsz * winsz; texForTF.filterMode = cudaFilterModeLinear; texForTF.addressMode[0] = cudaAddressModeWrap; texForTF.addressMode[1] = cudaAddressModeWrap; cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>(); cudaSafeCall( cudaBindTexture2D( 0, texForTF, input.data, desc, input.cols, input.rows, input.step ) ); dim3 threads(128, 1, 1); dim3 grid(1, 1, 1); grid.x = divUp(input.cols, threads.x); grid.y = divUp(input.rows, RpT); size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float); textureness_kernel<<<grid, threads, smem_size, stream>>>(disp, winsz, avgTexturenessThreshold); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaUnbindTexture (texForTF) ); } } // namespace stereobm }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
a0eec0637cc62eea8eba3a8f8131665a0da73ea8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // For use of the printf function #include <sys/time.h> // For use of gettimeofday function #define NUM_ITERATIONS 10000 #define ABS(a) ((a) < 0 ? -(a) : (a)) #define DT 1 int NUM_PARTICLES; // # of particles to simulate, equivalent to # of threads int BLOCK_SIZE; // Threads PER block // Gravity field float3 field = (float3) {0.f, 0.f, 9.8f}; // Structure for the particles typedef struct { float3 position; float3 velocity; } Particle; /** * Can use multiple qualifiers to specify where a function will run in order * to reuse code that needs to be run on both host and device. * Change the position of the given particle based on its velocity using the * formula `new_position.coord = old_position.coord + velocity.coord` where * coord is x, y and z. * * @param particle Particle for which a position update will be performed */ __host__ __device__ void updatePosition(Particle *particle) { particle->position.x = particle->position.x + particle->velocity.x * DT; particle->position.y = particle->position.y + particle->velocity.y * DT; particle->position.z = particle->position.z + particle->velocity.z * DT; } /** * Update the velocity of the given particle according to a field that specifies * the rate of change for each dimension of the particle's velocity * * @param particle Particle for which a velocity update will be performed * @param field Rate of change for each dimension (x, y, z) of a velocity */ __host__ __device__ void updateVelocity(Particle *particle, float3 field) { particle->velocity.x = particle->velocity.x + field.x * DT; particle->velocity.y = particle->velocity.y + field.y * DT; particle->velocity.z = particle->velocity.z + field.z * DT; } /** * Device implementation for the simulation of moving particles * * @param particles List of particles for which to simulate movement * @param field Values specifying the rate of change for a * particle's velocity in each dimension * @param num_particles # of particles, used to determine how many threads * to give work if too many threads are initiated * @param num_iterations # of timesteps a thread should simulate a particle */ __global__ void simulateParticlesKernel(Particle *particles, float3 field, int num_particles, int num_iterations) { // Unique ID of the current thread to determine what work to compute int threadId = blockIdx.x * blockDim.x + threadIdx.x; // This thread has no work to do, exit if (threadId > num_particles) return; // Get the right particle Particle *particle = particles + threadId; for (int i = 0; i < num_iterations; ++i) { // Update velocity first updateVelocity(particle, field); // Update position updatePosition(particle); } } /** * Host implementation for the simulation of moving particles * * @param particles List of particles for which to simulate movement * @param num_particles # of particles to simulate * @param num_iterations # of timesteps for which to simulate each particle */ __host__ void simulateParticlesHost(Particle *particles, int num_particles, int num_iterations) { for (Particle *particle = particles; particle < particles + num_particles; particle++) { for (int i = 0; i < num_iterations; ++i) { // Update velocity first updateVelocity(particle, field); // Update position updatePosition(particle); } } } /** * Fill the given array with n random floats. * * @param array Array to populate with floats. * @param n Number of floats to populate the array with. */ void populateParticleArray(Particle *particles, int n) { Particle particle; for (int index = 0; index < n; index++) { // Generate random particles particle.position.x = 10.0 * ((float) rand() / (float) RAND_MAX); particle.position.y = 10.0 * ((float) rand() / (float) RAND_MAX); particle.position.z = 10.0 * ((float) rand() / (float) RAND_MAX); particle.velocity.x = 1.0 * ((float) rand() / (float) RAND_MAX); particle.velocity.y = 1.0 * ((float) rand() / (float) RAND_MAX); particle.velocity.z = 1.0 * ((float) rand() / (float) RAND_MAX); particles[index] = particle; } } /** * Compare the simulation results of the device and host implementations. * * @param deviceOut Outcome of simulation from the device implementation * @param hostOut Outcome of simulation from the host implementation * @param n The size of deviceOut and hostOut */ void compareSimulationResults(Particle *deviceOut, Particle *hostOut, int n) { bool resultsAreEqual = true; printf("Comparing the output for each implementation... "); for (int index = 0; index < n; index++) { float cumDiff = 0; cumDiff += ABS(deviceOut[index].position.x - hostOut[index].position.x); cumDiff += ABS(deviceOut[index].position.y - hostOut[index].position.y); cumDiff += ABS(deviceOut[index].position.z - hostOut[index].position.z); cumDiff += ABS(deviceOut[index].velocity.x - hostOut[index].velocity.x); cumDiff += ABS(deviceOut[index].velocity.y - hostOut[index].velocity.y); cumDiff += ABS(deviceOut[index].velocity.z - hostOut[index].velocity.z); // Difference is larger than rounding-error tolerance of .001, means // the outcomes are too different if (cumDiff > .001 || cumDiff < -.001) { resultsAreEqual = false; break; } } // The outcomes of simulation for the device and host implementations are equal if (resultsAreEqual) { printf("Correct!\n"); } else { printf("INCORRECT!!!\n"); } } /** * Return a timestamp with double precision. */ double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } // Entry point into the program, run each implementation of simulation and compare // the results int main(int argc, char **argv) { char *file_path; FILE *out_file = 0; if (argc != 3 && argc != 4) { printf("Usage: %s <num_particles> <block_size> [output_file]\n", argv[0]); exit(-1); } else { NUM_PARTICLES = atoi(argv[1]); BLOCK_SIZE = atoi(argv[2]); if (argc == 4) file_path = argv[3]; } if (file_path) out_file = fopen(file_path, "a"); // Allocate memory on the host Particle *hostParitcles = (Particle *) malloc(NUM_PARTICLES * sizeof(Particle)); // Allocate memory on the device Particle *devParticles; hipMalloc(&devParticles, NUM_PARTICLES * sizeof(Particle)); // Fill hostParitcles arrays with random floats populateParticleArray(hostParitcles, NUM_PARTICLES); // Copy hostParitcles onto the GPU hipMemcpy(devParticles, hostParitcles, NUM_PARTICLES * sizeof(Particle), hipMemcpyHostToDevice); double startTime = cpuSecond(); if (NUM_PARTICLES < 100001) { printf("Simulating particles on the CPU...\n"); simulateParticlesHost(hostParitcles, NUM_PARTICLES, NUM_ITERATIONS); if (out_file) fprintf(out_file, "cpu,%d,%d,%d,%f\n", NUM_PARTICLES, BLOCK_SIZE, NUM_ITERATIONS, cpuSecond() - startTime); printf("%f seconds\n", cpuSecond() - startTime); } printf("Simulating particles on the GPU... "); startTime = cpuSecond(); // Round-up to the nearest multiple of BLOCK_SIZE that can hold at least NUM_PARTICLES // threads hipLaunchKernelGGL(( simulateParticlesKernel) , dim3((NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, devParticles, field, NUM_PARTICLES, NUM_ITERATIONS); // Wait until all the threads on the GPU have finished before continuing!!! hipDeviceSynchronize(); if (out_file) fprintf(out_file, "gpu,%d,%d,%d,%f\n", NUM_PARTICLES, BLOCK_SIZE, NUM_ITERATIONS, cpuSecond() - startTime); printf("%f seconds\n", cpuSecond() - startTime); // Copy the result of the simulation on the device back to // the host into hostParitcles Particle *particlesFromGPU = (Particle *) malloc(NUM_PARTICLES * sizeof(Particle)); hipMemcpy(particlesFromGPU, devParticles, NUM_PARTICLES * sizeof(Particle), hipMemcpyDeviceToHost); // Compare the results of simulation on device and host compareSimulationResults(hostParitcles, particlesFromGPU, NUM_PARTICLES); // Free the allocated memory!!! free(hostParitcles); free(particlesFromGPU); hipFree(devParticles); if (file_path) fclose(out_file); return 0; }
a0eec0637cc62eea8eba3a8f8131665a0da73ea8.cu
#include <stdio.h> // For use of the printf function #include <sys/time.h> // For use of gettimeofday function #define NUM_ITERATIONS 10000 #define ABS(a) ((a) < 0 ? -(a) : (a)) #define DT 1 int NUM_PARTICLES; // # of particles to simulate, equivalent to # of threads int BLOCK_SIZE; // Threads PER block // Gravity field float3 field = (float3) {0.f, 0.f, 9.8f}; // Structure for the particles typedef struct { float3 position; float3 velocity; } Particle; /** * Can use multiple qualifiers to specify where a function will run in order * to reuse code that needs to be run on both host and device. * Change the position of the given particle based on its velocity using the * formula `new_position.coord = old_position.coord + velocity.coord` where * coord is x, y and z. * * @param particle Particle for which a position update will be performed */ __host__ __device__ void updatePosition(Particle *particle) { particle->position.x = particle->position.x + particle->velocity.x * DT; particle->position.y = particle->position.y + particle->velocity.y * DT; particle->position.z = particle->position.z + particle->velocity.z * DT; } /** * Update the velocity of the given particle according to a field that specifies * the rate of change for each dimension of the particle's velocity * * @param particle Particle for which a velocity update will be performed * @param field Rate of change for each dimension (x, y, z) of a velocity */ __host__ __device__ void updateVelocity(Particle *particle, float3 field) { particle->velocity.x = particle->velocity.x + field.x * DT; particle->velocity.y = particle->velocity.y + field.y * DT; particle->velocity.z = particle->velocity.z + field.z * DT; } /** * Device implementation for the simulation of moving particles * * @param particles List of particles for which to simulate movement * @param field Values specifying the rate of change for a * particle's velocity in each dimension * @param num_particles # of particles, used to determine how many threads * to give work if too many threads are initiated * @param num_iterations # of timesteps a thread should simulate a particle */ __global__ void simulateParticlesKernel(Particle *particles, float3 field, int num_particles, int num_iterations) { // Unique ID of the current thread to determine what work to compute int threadId = blockIdx.x * blockDim.x + threadIdx.x; // This thread has no work to do, exit if (threadId > num_particles) return; // Get the right particle Particle *particle = particles + threadId; for (int i = 0; i < num_iterations; ++i) { // Update velocity first updateVelocity(particle, field); // Update position updatePosition(particle); } } /** * Host implementation for the simulation of moving particles * * @param particles List of particles for which to simulate movement * @param num_particles # of particles to simulate * @param num_iterations # of timesteps for which to simulate each particle */ __host__ void simulateParticlesHost(Particle *particles, int num_particles, int num_iterations) { for (Particle *particle = particles; particle < particles + num_particles; particle++) { for (int i = 0; i < num_iterations; ++i) { // Update velocity first updateVelocity(particle, field); // Update position updatePosition(particle); } } } /** * Fill the given array with n random floats. * * @param array Array to populate with floats. * @param n Number of floats to populate the array with. */ void populateParticleArray(Particle *particles, int n) { Particle particle; for (int index = 0; index < n; index++) { // Generate random particles particle.position.x = 10.0 * ((float) rand() / (float) RAND_MAX); particle.position.y = 10.0 * ((float) rand() / (float) RAND_MAX); particle.position.z = 10.0 * ((float) rand() / (float) RAND_MAX); particle.velocity.x = 1.0 * ((float) rand() / (float) RAND_MAX); particle.velocity.y = 1.0 * ((float) rand() / (float) RAND_MAX); particle.velocity.z = 1.0 * ((float) rand() / (float) RAND_MAX); particles[index] = particle; } } /** * Compare the simulation results of the device and host implementations. * * @param deviceOut Outcome of simulation from the device implementation * @param hostOut Outcome of simulation from the host implementation * @param n The size of deviceOut and hostOut */ void compareSimulationResults(Particle *deviceOut, Particle *hostOut, int n) { bool resultsAreEqual = true; printf("Comparing the output for each implementation... "); for (int index = 0; index < n; index++) { float cumDiff = 0; cumDiff += ABS(deviceOut[index].position.x - hostOut[index].position.x); cumDiff += ABS(deviceOut[index].position.y - hostOut[index].position.y); cumDiff += ABS(deviceOut[index].position.z - hostOut[index].position.z); cumDiff += ABS(deviceOut[index].velocity.x - hostOut[index].velocity.x); cumDiff += ABS(deviceOut[index].velocity.y - hostOut[index].velocity.y); cumDiff += ABS(deviceOut[index].velocity.z - hostOut[index].velocity.z); // Difference is larger than rounding-error tolerance of .001, means // the outcomes are too different if (cumDiff > .001 || cumDiff < -.001) { resultsAreEqual = false; break; } } // The outcomes of simulation for the device and host implementations are equal if (resultsAreEqual) { printf("Correct!\n"); } else { printf("INCORRECT!!!\n"); } } /** * Return a timestamp with double precision. */ double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } // Entry point into the program, run each implementation of simulation and compare // the results int main(int argc, char **argv) { char *file_path; FILE *out_file = 0; if (argc != 3 && argc != 4) { printf("Usage: %s <num_particles> <block_size> [output_file]\n", argv[0]); exit(-1); } else { NUM_PARTICLES = atoi(argv[1]); BLOCK_SIZE = atoi(argv[2]); if (argc == 4) file_path = argv[3]; } if (file_path) out_file = fopen(file_path, "a"); // Allocate memory on the host Particle *hostParitcles = (Particle *) malloc(NUM_PARTICLES * sizeof(Particle)); // Allocate memory on the device Particle *devParticles; cudaMalloc(&devParticles, NUM_PARTICLES * sizeof(Particle)); // Fill hostParitcles arrays with random floats populateParticleArray(hostParitcles, NUM_PARTICLES); // Copy hostParitcles onto the GPU cudaMemcpy(devParticles, hostParitcles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice); double startTime = cpuSecond(); if (NUM_PARTICLES < 100001) { printf("Simulating particles on the CPU...\n"); simulateParticlesHost(hostParitcles, NUM_PARTICLES, NUM_ITERATIONS); if (out_file) fprintf(out_file, "cpu,%d,%d,%d,%f\n", NUM_PARTICLES, BLOCK_SIZE, NUM_ITERATIONS, cpuSecond() - startTime); printf("%f seconds\n", cpuSecond() - startTime); } printf("Simulating particles on the GPU... "); startTime = cpuSecond(); // Round-up to the nearest multiple of BLOCK_SIZE that can hold at least NUM_PARTICLES // threads simulateParticlesKernel <<<(NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>> (devParticles, field, NUM_PARTICLES, NUM_ITERATIONS); // Wait until all the threads on the GPU have finished before continuing!!! cudaDeviceSynchronize(); if (out_file) fprintf(out_file, "gpu,%d,%d,%d,%f\n", NUM_PARTICLES, BLOCK_SIZE, NUM_ITERATIONS, cpuSecond() - startTime); printf("%f seconds\n", cpuSecond() - startTime); // Copy the result of the simulation on the device back to // the host into hostParitcles Particle *particlesFromGPU = (Particle *) malloc(NUM_PARTICLES * sizeof(Particle)); cudaMemcpy(particlesFromGPU, devParticles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost); // Compare the results of simulation on device and host compareSimulationResults(hostParitcles, particlesFromGPU, NUM_PARTICLES); // Free the allocated memory!!! free(hostParitcles); free(particlesFromGPU); cudaFree(devParticles); if (file_path) fclose(out_file); return 0; }
f6cc0c17a26c67066a8b0c1b2f9dc75b22c8253e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * GPUDataAccessor.cu * * Created on: 29 gru 2019 * Author: pkua */ #include "GPUDataAccessor.h" __global__ void gpu_copy_to_global_memory(const void *object, void *globalDest, size_t size) { if (!CUDA_IS_IT_FIRST_THREAD) return; memcpy(globalDest, object, size); }
f6cc0c17a26c67066a8b0c1b2f9dc75b22c8253e.cu
/* * GPUDataAccessor.cu * * Created on: 29 gru 2019 * Author: pkua */ #include "GPUDataAccessor.h" __global__ void gpu_copy_to_global_memory(const void *object, void *globalDest, size_t size) { if (!CUDA_IS_IT_FIRST_THREAD) return; memcpy(globalDest, object, size); }
8c5f379e4d6b188f3fa24a785ce44495edb9f514.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/device_functions.h> __device__ int *boyo; __global__ void gimme() { printf("%d \n", boyo[threadIdx.x]); } int main(int argc, char *argv[]) { int mb[32]; hipMalloc((void **) &boyo, 32*4); for (int i=0; i<32; i++) { mb[i] = i*i; } hipMemcpy(boyo, &mb, 32*4, hipMemcpyHostToDevice); hipLaunchKernelGGL(( gimme) , dim3(1), dim3(32), 0, 0, ); hipFree(boyo); return 0; }
8c5f379e4d6b188f3fa24a785ce44495edb9f514.cu
#include <cuda.h> #include <stdio.h> #include <cuda_runtime.h> #include <device_functions.h> __device__ int *boyo; __global__ void gimme() { printf("%d \n", boyo[threadIdx.x]); } int main(int argc, char *argv[]) { int mb[32]; cudaMalloc((void **) &boyo, 32*4); for (int i=0; i<32; i++) { mb[i] = i*i; } cudaMemcpy(boyo, &mb, 32*4, cudaMemcpyHostToDevice); gimme <<<1, 32>>> (); cudaFree(boyo); return 0; }
134232903f1f7cc47775ffa7bb8f73f7e55fe286.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <hip/hip_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=19){ #pragma unroll 100 for(unsigned k=0; k<iterations;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
134232903f1f7cc47775ffa7bb8f73f7e55fe286.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <cuda_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=19){ #pragma unroll 100 for(unsigned k=0; k<iterations;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
d692930c816f36da3ed68a3676a82faf6c1b9738.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** #include "utils.h" #include <stdio.h> __device__ int clamp(int a, int b) { return min(max(a, 0), b - 1); } __device__ void gaussian_blurNoShared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; const int halfFilterWidth = filterWidth >> 1; if(index_x < numCols && index_y < numRows) { const int index = index_y * numCols + index_x; float sum = 0; for(int y = 0; y < filterWidth; ++y) { for(int x = 0; x < filterWidth; ++x) { int color_x = clamp(index_x + x - halfFilterWidth, numCols); int color_y = clamp(index_y + y - halfFilterWidth, numRows); float filterValue = filter[y * filterWidth + x]; sum += filterValue * inputChannel[color_y * numCols + color_x]; } } outputChannel[index] = sum; } } __device__ int isBottomSide(const int filterWidth) { return threadIdx.y >= (blockDim.y - filterWidth + 1); } __device__ int isRightSide(const int filterWidth) { return threadIdx.x >= (blockDim.x - filterWidth + 1); } __device__ int isBottomRightCorner(const int filterWidth) { return threadIdx.x < (filterWidth - 1) && threadIdx.y < (filterWidth - 1); } __global__ void gaussian_blurWithShared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { extern __shared__ unsigned char s_array[]; const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; const int index = index_y * numCols + index_x; const int halfFilterWidth = filterWidth >> 1; int positionToLoadFromX = clamp(index_x - halfFilterWidth, numCols); int positionToLoadFromY = clamp(index_y - halfFilterWidth, numRows); int blurredBlockDimension = (blockDim.x + filterWidth - 1); s_array[threadIdx.y* blurredBlockDimension + threadIdx.x] = inputChannel[positionToLoadFromY*numCols + positionToLoadFromX]; int positionToLoadFromX_original = positionToLoadFromX; int positionToLoadFromY_original = positionToLoadFromY; if (isBottomSide(filterWidth)) { positionToLoadFromY = clamp(index_y + halfFilterWidth, numRows); s_array[(threadIdx.y + filterWidth - 1)*blurredBlockDimension + threadIdx.x] = inputChannel[positionToLoadFromY*numCols + positionToLoadFromX_original]; } if (isRightSide(filterWidth)) { positionToLoadFromX = clamp(index_x + halfFilterWidth, numCols); s_array[threadIdx.y * blurredBlockDimension + threadIdx.x + filterWidth - 1] = inputChannel[positionToLoadFromY_original*numCols + positionToLoadFromX]; } if(isBottomRightCorner(filterWidth)){ positionToLoadFromX = clamp(index_x - halfFilterWidth + blockDim.x, numCols); positionToLoadFromY = clamp(index_y - halfFilterWidth + blockDim.y, numRows); s_array[(threadIdx.y + blockDim.y)*blurredBlockDimension + threadIdx.x + blockDim.x] = inputChannel[positionToLoadFromY*numCols + positionToLoadFromX]; } __syncthreads(); if(index_x < numCols && index_y < numRows) { float sum = 0; for(int y = 0; y < filterWidth; ++y) { for(int x = 0; x < filterWidth; ++x) { int color_x = threadIdx.x + x; int color_y = threadIdx.y + y; float filterValue = filter[y * filterWidth + x]; float channelValue = (float)s_array[color_y * blurredBlockDimension + color_x]; sum += filterValue * channelValue; } } outputChannel[index] = sum; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; if(index_x < numCols && index_y < numRows) { const int index = index_y * numCols + index_x; redChannel[index] = inputImageRGBA[index].x; greenChannel[index] = inputImageRGBA[index].y; blueChannel[index] = inputImageRGBA[index].z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; if(index_x < numCols && index_y < numRows) { const int index = index_y * numCols + index_x; unsigned char red = redChannel[index]; unsigned char green = greenChannel[index]; unsigned char blue = blueChannel[index]; uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[index] = outputPixel; } } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { size_t channelSize = sizeof(unsigned char) * numRowsImage * numColsImage; checkCudaErrors(hipMalloc(&d_red, channelSize)); checkCudaErrors(hipMalloc(&d_green, channelSize)); checkCudaErrors(hipMalloc(&d_blue, channelSize)); size_t filterSize = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(hipMalloc(&d_filter, filterSize)); checkCudaErrors(hipMemcpy(d_filter, h_filter, filterSize, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const int BLOCK_WIDTH = 32; const dim3 blockSize(BLOCK_WIDTH, BLOCK_WIDTH, 1); const dim3 gridSize(numCols / BLOCK_WIDTH + 1, numRows / BLOCK_WIDTH + 1, 1); hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); int shared_size = (blockSize.x + filterWidth - 1)*(blockSize.y + filterWidth - 1) * sizeof(unsigned char); // gaussian_blurNoShared<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blurNoShared<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blurNoShared<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blurWithShared), dim3(gridSize), dim3(blockSize), shared_size, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blurWithShared), dim3(gridSize), dim3(blockSize), shared_size, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blurWithShared), dim3(gridSize), dim3(blockSize), shared_size, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
d692930c816f36da3ed68a3676a82faf6c1b9738.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** #include "utils.h" #include <stdio.h> __device__ int clamp(int a, int b) { return min(max(a, 0), b - 1); } __device__ void gaussian_blurNoShared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; const int halfFilterWidth = filterWidth >> 1; if(index_x < numCols && index_y < numRows) { const int index = index_y * numCols + index_x; float sum = 0; for(int y = 0; y < filterWidth; ++y) { for(int x = 0; x < filterWidth; ++x) { int color_x = clamp(index_x + x - halfFilterWidth, numCols); int color_y = clamp(index_y + y - halfFilterWidth, numRows); float filterValue = filter[y * filterWidth + x]; sum += filterValue * inputChannel[color_y * numCols + color_x]; } } outputChannel[index] = sum; } } __device__ int isBottomSide(const int filterWidth) { return threadIdx.y >= (blockDim.y - filterWidth + 1); } __device__ int isRightSide(const int filterWidth) { return threadIdx.x >= (blockDim.x - filterWidth + 1); } __device__ int isBottomRightCorner(const int filterWidth) { return threadIdx.x < (filterWidth - 1) && threadIdx.y < (filterWidth - 1); } __global__ void gaussian_blurWithShared(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { extern __shared__ unsigned char s_array[]; const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; const int index = index_y * numCols + index_x; const int halfFilterWidth = filterWidth >> 1; int positionToLoadFromX = clamp(index_x - halfFilterWidth, numCols); int positionToLoadFromY = clamp(index_y - halfFilterWidth, numRows); int blurredBlockDimension = (blockDim.x + filterWidth - 1); s_array[threadIdx.y* blurredBlockDimension + threadIdx.x] = inputChannel[positionToLoadFromY*numCols + positionToLoadFromX]; int positionToLoadFromX_original = positionToLoadFromX; int positionToLoadFromY_original = positionToLoadFromY; if (isBottomSide(filterWidth)) { positionToLoadFromY = clamp(index_y + halfFilterWidth, numRows); s_array[(threadIdx.y + filterWidth - 1)*blurredBlockDimension + threadIdx.x] = inputChannel[positionToLoadFromY*numCols + positionToLoadFromX_original]; } if (isRightSide(filterWidth)) { positionToLoadFromX = clamp(index_x + halfFilterWidth, numCols); s_array[threadIdx.y * blurredBlockDimension + threadIdx.x + filterWidth - 1] = inputChannel[positionToLoadFromY_original*numCols + positionToLoadFromX]; } if(isBottomRightCorner(filterWidth)){ positionToLoadFromX = clamp(index_x - halfFilterWidth + blockDim.x, numCols); positionToLoadFromY = clamp(index_y - halfFilterWidth + blockDim.y, numRows); s_array[(threadIdx.y + blockDim.y)*blurredBlockDimension + threadIdx.x + blockDim.x] = inputChannel[positionToLoadFromY*numCols + positionToLoadFromX]; } __syncthreads(); if(index_x < numCols && index_y < numRows) { float sum = 0; for(int y = 0; y < filterWidth; ++y) { for(int x = 0; x < filterWidth; ++x) { int color_x = threadIdx.x + x; int color_y = threadIdx.y + y; float filterValue = filter[y * filterWidth + x]; float channelValue = (float)s_array[color_y * blurredBlockDimension + color_x]; sum += filterValue * channelValue; } } outputChannel[index] = sum; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; if(index_x < numCols && index_y < numRows) { const int index = index_y * numCols + index_x; redChannel[index] = inputImageRGBA[index].x; greenChannel[index] = inputImageRGBA[index].y; blueChannel[index] = inputImageRGBA[index].z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int index_x = blockIdx.x * blockDim.x + threadIdx.x; const int index_y = blockIdx.y * blockDim.y + threadIdx.y; if(index_x < numCols && index_y < numRows) { const int index = index_y * numCols + index_x; unsigned char red = redChannel[index]; unsigned char green = greenChannel[index]; unsigned char blue = blueChannel[index]; uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[index] = outputPixel; } } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { size_t channelSize = sizeof(unsigned char) * numRowsImage * numColsImage; checkCudaErrors(cudaMalloc(&d_red, channelSize)); checkCudaErrors(cudaMalloc(&d_green, channelSize)); checkCudaErrors(cudaMalloc(&d_blue, channelSize)); size_t filterSize = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(cudaMalloc(&d_filter, filterSize)); checkCudaErrors(cudaMemcpy(d_filter, h_filter, filterSize, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { const int BLOCK_WIDTH = 32; const dim3 blockSize(BLOCK_WIDTH, BLOCK_WIDTH, 1); const dim3 gridSize(numCols / BLOCK_WIDTH + 1, numRows / BLOCK_WIDTH + 1, 1); separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); int shared_size = (blockSize.x + filterWidth - 1)*(blockSize.y + filterWidth - 1) * sizeof(unsigned char); // gaussian_blurNoShared<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blurNoShared<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); // gaussian_blurNoShared<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blurWithShared<<<gridSize, blockSize, shared_size>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blurWithShared<<<gridSize, blockSize, shared_size>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blurWithShared<<<gridSize, blockSize, shared_size>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
af4071012df0513d395be3ea454008ec7137cd22.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> #include <stdio.h> #include "ttime.h" static hipEvent_t start, stop; static hipEvent_t start2, stop2; static struct timespec cpu_start, cpu_stop; static bool second = false; void start_time_cuda() { hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&start2); hipEventCreate(&stop2); if (second) hipEventRecord( start2, 0 ); else hipEventRecord( start, 0 ); } void start_time_cpu() { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start); } void stop_time_cuda() { float elapsedTime; if (second) { hipEventRecord(stop2, 0); hipEventSynchronize(stop2); hipEventElapsedTime( &elapsedTime, start2, stop2); } else { hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime( &elapsedTime, start, stop); } printf("Total GPU execution time: %3.1f ms\n", elapsedTime); second = true; } void stop_time_cpu() { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop); double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6; printf( "CPU execution time: %3.1f ms\n", result); }
af4071012df0513d395be3ea454008ec7137cd22.cu
#include <time.h> #include <stdio.h> #include "ttime.h" static cudaEvent_t start, stop; static cudaEvent_t start2, stop2; static struct timespec cpu_start, cpu_stop; static bool second = false; void start_time_cuda() { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start2); cudaEventCreate(&stop2); if (second) cudaEventRecord( start2, 0 ); else cudaEventRecord( start, 0 ); } void start_time_cpu() { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start); } void stop_time_cuda() { float elapsedTime; if (second) { cudaEventRecord(stop2, 0); cudaEventSynchronize(stop2); cudaEventElapsedTime( &elapsedTime, start2, stop2); } else { cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &elapsedTime, start, stop); } printf("Total GPU execution time: %3.1f ms\n", elapsedTime); second = true; } void stop_time_cpu() { clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop); double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6; printf( "CPU execution time: %3.1f ms\n", result); }
a7e7b87b953086f983b35def6c3d33bb8e0bc0d0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define N 512 #define MAX_ERR 1e-6 //__global__ void vector_add(float *out, float *a, float *b, int n) { // int stride = 1; // int tid = blockIdx.x * blockDim.x + threadIdx.x; // 0 * 256 + 1 = 1 | BLOCK0 | // 0 * 256 + 2 = 2 // 1 * 256 + 1 = 257 | BLOCK1 | // 1 * 256 + 2 = 258 // out[tid] = a[tid] + b[tid]; //} void print_results(float *C){ printf("ADDING\n"); printf("["); for(int i = 0 ; i < 3; i++){ printf("%f,",C[i]); } printf("]\n"); } void print_results_sub(float *C){ printf("SUBSTRACTING\n"); printf("["); for(int i = 0 ; i < 3; i++){ printf("%f,",C[i]); } printf("]\n"); } __global__ void vector_add(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] + CUDA_B[tid]; } __global__ void vector_sub(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] - CUDA_B[tid]; } __global__ void vector_dot_product(float *CUDA_A, float *CUDA_B, float *CUDA_C,float *CUDA_K, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] * CUDA_B[tid]; // Only one kernel should apply the dot product __syncthreads(); if(tid == 0){ *CUDA_K = CUDA_C[tid] + CUDA_C[tid+1] + CUDA_C[tid+2]; } } int main(){ float *C, *K; float *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_K; // Allocate host memory float A[3]= {2.0,4.0,6.0}; printf("A = {2.0,4.0,6.0}\n"); float B[3]= {1.0,2.0,3.0}; printf("B = {1.0,2.0,3.0}\n"); C = (float*)malloc(sizeof(float) * N); K = (float*)malloc(sizeof(float)); // Allocate device memory hipMalloc((void**)&CUDA_A, sizeof(float) * N); hipMalloc((void**)&CUDA_B, sizeof(float) * N); hipMalloc((void**)&CUDA_C, sizeof(float) * N); hipMalloc((void**)&CUDA_C, sizeof(float) * N); hipMalloc((void**)&CUDA_K, sizeof(float)); // Transfer data from host to device memory hipMemcpy(CUDA_A, A, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(CUDA_B, B, sizeof(float) * N, hipMemcpyHostToDevice); // Executing kernel hipLaunchKernelGGL(( vector_add), dim3(1),dim3(3), 0, 0, CUDA_A, CUDA_B, CUDA_C, N); hipMemcpy(C, CUDA_C, sizeof(float) * N, hipMemcpyDeviceToHost); //Executing kernel print_results(C); hipLaunchKernelGGL(( vector_sub), dim3(1),dim3(3), 0, 0, CUDA_A, CUDA_B, CUDA_C, N); hipMemcpy(C, CUDA_C, sizeof(float) * N, hipMemcpyDeviceToHost); print_results_sub(C); hipLaunchKernelGGL(( vector_dot_product), dim3(1),dim3(3), 0, 0, CUDA_A, CUDA_B, CUDA_C, CUDA_K, N); hipMemcpy(C, CUDA_C, sizeof(float) * N, hipMemcpyDeviceToHost); print_results(C); //hipMemcpy(K, CUDA_K, sizeof(float), hipMemcpyDeviceToHost); //printf("Dot product result %f", *K); // Deallocate device memory hipFree(CUDA_A); hipFree(CUDA_B); hipFree(CUDA_C); // Deallocate host memory //free(A); //free(B); free(C); }
a7e7b87b953086f983b35def6c3d33bb8e0bc0d0.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #define N 512 #define MAX_ERR 1e-6 //__global__ void vector_add(float *out, float *a, float *b, int n) { // int stride = 1; // int tid = blockIdx.x * blockDim.x + threadIdx.x; // 0 * 256 + 1 = 1 | BLOCK0 | // 0 * 256 + 2 = 2 // 1 * 256 + 1 = 257 | BLOCK1 | // 1 * 256 + 2 = 258 // out[tid] = a[tid] + b[tid]; //} void print_results(float *C){ printf("ADDING\n"); printf("["); for(int i = 0 ; i < 3; i++){ printf("%f,",C[i]); } printf("]\n"); } void print_results_sub(float *C){ printf("SUBSTRACTING\n"); printf("["); for(int i = 0 ; i < 3; i++){ printf("%f,",C[i]); } printf("]\n"); } __global__ void vector_add(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] + CUDA_B[tid]; } __global__ void vector_sub(float *CUDA_A, float *CUDA_B, float *CUDA_C, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] - CUDA_B[tid]; } __global__ void vector_dot_product(float *CUDA_A, float *CUDA_B, float *CUDA_C,float *CUDA_K, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; CUDA_C[tid] = CUDA_A[tid] * CUDA_B[tid]; // Only one kernel should apply the dot product __syncthreads(); if(tid == 0){ *CUDA_K = CUDA_C[tid] + CUDA_C[tid+1] + CUDA_C[tid+2]; } } int main(){ float *C, *K; float *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_K; // Allocate host memory float A[3]= {2.0,4.0,6.0}; printf("A = {2.0,4.0,6.0}\n"); float B[3]= {1.0,2.0,3.0}; printf("B = {1.0,2.0,3.0}\n"); C = (float*)malloc(sizeof(float) * N); K = (float*)malloc(sizeof(float)); // Allocate device memory cudaMalloc((void**)&CUDA_A, sizeof(float) * N); cudaMalloc((void**)&CUDA_B, sizeof(float) * N); cudaMalloc((void**)&CUDA_C, sizeof(float) * N); cudaMalloc((void**)&CUDA_C, sizeof(float) * N); cudaMalloc((void**)&CUDA_K, sizeof(float)); // Transfer data from host to device memory cudaMemcpy(CUDA_A, A, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(CUDA_B, B, sizeof(float) * N, cudaMemcpyHostToDevice); // Executing kernel vector_add<<<1,3>>>(CUDA_A, CUDA_B, CUDA_C, N); cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost); //Executing kernel print_results(C); vector_sub<<<1,3>>>(CUDA_A, CUDA_B, CUDA_C, N); cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost); print_results_sub(C); vector_dot_product<<<1,3>>>(CUDA_A, CUDA_B, CUDA_C, CUDA_K, N); cudaMemcpy(C, CUDA_C, sizeof(float) * N, cudaMemcpyDeviceToHost); print_results(C); //cudaMemcpy(K, CUDA_K, sizeof(float), cudaMemcpyDeviceToHost); //printf("Dot product result %f", *K); // Deallocate device memory cudaFree(CUDA_A); cudaFree(CUDA_B); cudaFree(CUDA_C); // Deallocate host memory //free(A); //free(B); free(C); }
e836e92aae70073b7cd532c36b4701e09a45bbbe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************* * CUDAHammingMean.cu * CUDAHammingMean * * Author: Kareem Omar * kareem.omar@uah.edu * https://github.com/komrad36 * * Last updated Oct 8, 2016 *******************************************************************/ // // Fastest GPU implementation of a brute-force // Hamming-weight matrix for 512-bit binary descriptors. // // Yes, that means the DIFFERENCE in popcounts is used // for thresholding, NOT the ratio. This is the CORRECT // approach for binary descriptors. // // This laboriously crafted kernel is EXTREMELY fast. // 43 BILLION comparisons per second on a stock GTX1080, // enough to match nearly 38,000 descriptors per frame at 30 fps (!) // // A key insight responsible for much of the performance of // this insanely fast CUDA kernel is due to // Christopher Parker (https://github.com/csp256), to whom // I am extremely grateful. // // CUDA CC 3.0 or higher is required. // // All functionality is contained in the files CUDAK2NN.h // and CUDAK2NN.cu. 'main.cpp' is simply a sample test harness // with example usage and performance testing. // #include "CUDAHammingMean.h" __global__ void #ifndef __INTELLISENSE__ __launch_bounds__(256, 0) #endif CUDAHammingMean_kernel(const hipTextureObject_t tex_q, const int num_q, const uint64_t* __restrict__ g_training, const int num_t, uint32_t* const __restrict__ g_sums) { uint64_t train = *(g_training += threadIdx.x & 7); g_training += 8; uint64_t q[8]; for (int i = 0, offset = ((threadIdx.x & 24) << 3) + (threadIdx.x & 7) + (blockIdx.x << 11) + (threadIdx.y << 8); i < 8; ++i, offset += 8) { const uint2 buf = tex1Dfetch<uint2>(tex_q, offset); asm("mov.b64 %0, {%1,%2};" : "=l"(q[i]) : "r"(buf.x), "r"(buf.y)); // some assembly required } uint32_t total = 0U; #pragma unroll 6 for (int t = 0; t < num_t; ++t, g_training += 8) { uint32_t dist[4]; for (int i = 0; i < 4; ++i) dist[i] = __byte_perm(__popcll(q[i] ^ train), __popcll(q[i + 4] ^ train), 0x5410); for (int i = 0; i < 4; ++i) dist[i] += __shfl_xor(dist[i], 1); train = *g_training; if (threadIdx.x & 1) dist[0] = dist[1]; if (threadIdx.x & 1) dist[2] = dist[3]; dist[0] += __shfl_xor(dist[0], 2); dist[2] += __shfl_xor(dist[2], 2); if (threadIdx.x & 2) dist[0] = dist[2]; dist[0] += __shfl_xor(dist[0], 4); total += __byte_perm(dist[0], 0U, threadIdx.x & 4 ? 0x5432U : 0x5410U); } const int idx = (blockIdx.x << 8) + (threadIdx.y << 5) + threadIdx.x; if (idx < num_q) g_sums[idx] = total; } void CUDAHammingMean(const void* const __restrict d_t, const int num_t, const hipTextureObject_t tex_q, const int num_q, uint32_t* const __restrict d_sums) { hipLaunchKernelGGL(( CUDAHammingMean_kernel), dim3(((num_q - 1) >> 8) + 1), dim3({ 32), 8 }, 0, tex_q, num_q, reinterpret_cast<const uint64_t*>(d_t), num_t, d_sums); hipDeviceSynchronize(); }
e836e92aae70073b7cd532c36b4701e09a45bbbe.cu
/******************************************************************* * CUDAHammingMean.cu * CUDAHammingMean * * Author: Kareem Omar * kareem.omar@uah.edu * https://github.com/komrad36 * * Last updated Oct 8, 2016 *******************************************************************/ // // Fastest GPU implementation of a brute-force // Hamming-weight matrix for 512-bit binary descriptors. // // Yes, that means the DIFFERENCE in popcounts is used // for thresholding, NOT the ratio. This is the CORRECT // approach for binary descriptors. // // This laboriously crafted kernel is EXTREMELY fast. // 43 BILLION comparisons per second on a stock GTX1080, // enough to match nearly 38,000 descriptors per frame at 30 fps (!) // // A key insight responsible for much of the performance of // this insanely fast CUDA kernel is due to // Christopher Parker (https://github.com/csp256), to whom // I am extremely grateful. // // CUDA CC 3.0 or higher is required. // // All functionality is contained in the files CUDAK2NN.h // and CUDAK2NN.cu. 'main.cpp' is simply a sample test harness // with example usage and performance testing. // #include "CUDAHammingMean.h" __global__ void #ifndef __INTELLISENSE__ __launch_bounds__(256, 0) #endif CUDAHammingMean_kernel(const cudaTextureObject_t tex_q, const int num_q, const uint64_t* __restrict__ g_training, const int num_t, uint32_t* const __restrict__ g_sums) { uint64_t train = *(g_training += threadIdx.x & 7); g_training += 8; uint64_t q[8]; for (int i = 0, offset = ((threadIdx.x & 24) << 3) + (threadIdx.x & 7) + (blockIdx.x << 11) + (threadIdx.y << 8); i < 8; ++i, offset += 8) { const uint2 buf = tex1Dfetch<uint2>(tex_q, offset); asm("mov.b64 %0, {%1,%2};" : "=l"(q[i]) : "r"(buf.x), "r"(buf.y)); // some assembly required } uint32_t total = 0U; #pragma unroll 6 for (int t = 0; t < num_t; ++t, g_training += 8) { uint32_t dist[4]; for (int i = 0; i < 4; ++i) dist[i] = __byte_perm(__popcll(q[i] ^ train), __popcll(q[i + 4] ^ train), 0x5410); for (int i = 0; i < 4; ++i) dist[i] += __shfl_xor(dist[i], 1); train = *g_training; if (threadIdx.x & 1) dist[0] = dist[1]; if (threadIdx.x & 1) dist[2] = dist[3]; dist[0] += __shfl_xor(dist[0], 2); dist[2] += __shfl_xor(dist[2], 2); if (threadIdx.x & 2) dist[0] = dist[2]; dist[0] += __shfl_xor(dist[0], 4); total += __byte_perm(dist[0], 0U, threadIdx.x & 4 ? 0x5432U : 0x5410U); } const int idx = (blockIdx.x << 8) + (threadIdx.y << 5) + threadIdx.x; if (idx < num_q) g_sums[idx] = total; } void CUDAHammingMean(const void* const __restrict d_t, const int num_t, const cudaTextureObject_t tex_q, const int num_q, uint32_t* const __restrict d_sums) { CUDAHammingMean_kernel<<<((num_q - 1) >> 8) + 1, { 32, 8 }>>>(tex_q, num_q, reinterpret_cast<const uint64_t*>(d_t), num_t, d_sums); cudaDeviceSynchronize(); }
02ade06246e2a0a062d069982f57a09776037832.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2013-2014, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "convert/oskar_convert_ecef_to_station_uvw_cuda.h" #include "convert/private_convert_ecef_to_station_uvw_inline.h" #ifdef __cplusplus extern "C" { #endif /* Single precision. */ void oskar_convert_ecef_to_station_uvw_cuda_f(int num_stations, const float* d_x, const float* d_y, const float* d_z, float ha0_rad, float dec0_rad, float* d_u, float* d_v, float* d_w) { float sin_ha0, cos_ha0, sin_dec0, cos_dec0; /* Define block and grid sizes. */ int num_blocks, num_threads = 256; num_blocks = (num_stations + num_threads - 1) / num_threads; /* Precompute trig. */ sin_ha0 = (float)sin(ha0_rad); cos_ha0 = (float)cos(ha0_rad); sin_dec0 = (float)sin(dec0_rad); cos_dec0 = (float)cos(dec0_rad); oskar_convert_ecef_to_station_uvw_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_stations, d_x, d_y, d_z, sin_ha0, cos_ha0, sin_dec0, cos_dec0, d_u, d_v, d_w); } /* Double precision. */ void oskar_convert_ecef_to_station_uvw_cuda_d(int num_stations, const double* d_x, const double* d_y, const double* d_z, double ha0_rad, double dec0_rad, double* d_u, double* d_v, double* d_w) { double sin_ha0, cos_ha0, sin_dec0, cos_dec0; /* Define block and grid sizes. */ int num_blocks, num_threads = 256; num_blocks = (num_stations + num_threads - 1) / num_threads; /* Precompute trig. */ sin_ha0 = sin(ha0_rad); cos_ha0 = cos(ha0_rad); sin_dec0 = sin(dec0_rad); cos_dec0 = cos(dec0_rad); oskar_convert_ecef_to_station_uvw_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_stations, d_x, d_y, d_z, sin_ha0, cos_ha0, sin_dec0, cos_dec0, d_u, d_v, d_w); } /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_convert_ecef_to_station_uvw_cudak_f(const int num_stations, const float* restrict x, const float* restrict y, const float* restrict z, const float sin_ha0, const float cos_ha0, const float sin_dec0, const float cos_dec0, float* restrict u, float* restrict v, float* restrict w) { /* Get station ID. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_stations) return; oskar_convert_ecef_to_station_uvw_inline_f(x[i], y[i], z[i], sin_ha0, cos_ha0, sin_dec0, cos_dec0, &u[i], &v[i], &w[i]); } /* Double precision. */ __global__ void oskar_convert_ecef_to_station_uvw_cudak_d(const int num_stations, const double* restrict x, const double* restrict y, const double* restrict z, const double sin_ha0, const double cos_ha0, const double sin_dec0, const double cos_dec0, double* restrict u, double* restrict v, double* restrict w) { /* Get station ID. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_stations) return; oskar_convert_ecef_to_station_uvw_inline_d(x[i], y[i], z[i], sin_ha0, cos_ha0, sin_dec0, cos_dec0, &u[i], &v[i], &w[i]); } #ifdef __cplusplus } #endif
02ade06246e2a0a062d069982f57a09776037832.cu
/* * Copyright (c) 2013-2014, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "convert/oskar_convert_ecef_to_station_uvw_cuda.h" #include "convert/private_convert_ecef_to_station_uvw_inline.h" #ifdef __cplusplus extern "C" { #endif /* Single precision. */ void oskar_convert_ecef_to_station_uvw_cuda_f(int num_stations, const float* d_x, const float* d_y, const float* d_z, float ha0_rad, float dec0_rad, float* d_u, float* d_v, float* d_w) { float sin_ha0, cos_ha0, sin_dec0, cos_dec0; /* Define block and grid sizes. */ int num_blocks, num_threads = 256; num_blocks = (num_stations + num_threads - 1) / num_threads; /* Precompute trig. */ sin_ha0 = (float)sin(ha0_rad); cos_ha0 = (float)cos(ha0_rad); sin_dec0 = (float)sin(dec0_rad); cos_dec0 = (float)cos(dec0_rad); oskar_convert_ecef_to_station_uvw_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_stations, d_x, d_y, d_z, sin_ha0, cos_ha0, sin_dec0, cos_dec0, d_u, d_v, d_w); } /* Double precision. */ void oskar_convert_ecef_to_station_uvw_cuda_d(int num_stations, const double* d_x, const double* d_y, const double* d_z, double ha0_rad, double dec0_rad, double* d_u, double* d_v, double* d_w) { double sin_ha0, cos_ha0, sin_dec0, cos_dec0; /* Define block and grid sizes. */ int num_blocks, num_threads = 256; num_blocks = (num_stations + num_threads - 1) / num_threads; /* Precompute trig. */ sin_ha0 = sin(ha0_rad); cos_ha0 = cos(ha0_rad); sin_dec0 = sin(dec0_rad); cos_dec0 = cos(dec0_rad); oskar_convert_ecef_to_station_uvw_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads) (num_stations, d_x, d_y, d_z, sin_ha0, cos_ha0, sin_dec0, cos_dec0, d_u, d_v, d_w); } /* Kernels. ================================================================ */ /* Single precision. */ __global__ void oskar_convert_ecef_to_station_uvw_cudak_f(const int num_stations, const float* restrict x, const float* restrict y, const float* restrict z, const float sin_ha0, const float cos_ha0, const float sin_dec0, const float cos_dec0, float* restrict u, float* restrict v, float* restrict w) { /* Get station ID. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_stations) return; oskar_convert_ecef_to_station_uvw_inline_f(x[i], y[i], z[i], sin_ha0, cos_ha0, sin_dec0, cos_dec0, &u[i], &v[i], &w[i]); } /* Double precision. */ __global__ void oskar_convert_ecef_to_station_uvw_cudak_d(const int num_stations, const double* restrict x, const double* restrict y, const double* restrict z, const double sin_ha0, const double cos_ha0, const double sin_dec0, const double cos_dec0, double* restrict u, double* restrict v, double* restrict w) { /* Get station ID. */ const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= num_stations) return; oskar_convert_ecef_to_station_uvw_inline_d(x[i], y[i], z[i], sin_ha0, cos_ha0, sin_dec0, cos_dec0, &u[i], &v[i], &w[i]); } #ifdef __cplusplus } #endif
08764a59630831f49e1f575ab23b6cc51a5323c0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void cuda_hello(void) { printf("Hello, world!"); } int main(void) { printf("Calling cuda_hello...\n"); // call the CUDA kernel from the GPU hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, ); // wait for the kernel to finish hipDeviceSynchronize(); printf("...device synchronized.\n"); return 0; }
08764a59630831f49e1f575ab23b6cc51a5323c0.cu
#include <stdio.h> #include <cuda.h> __global__ void cuda_hello(void) { printf("Hello, world!"); } int main(void) { printf("Calling cuda_hello...\n"); // call the CUDA kernel from the GPU cuda_hello<<<1,1>>>(); // wait for the kernel to finish cudaDeviceSynchronize(); printf("...device synchronized.\n"); return 0; }
433708a3d274a0387ec39903d3577454ac4f70e6.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "freshman.h" void sumMatrix2D_CPU(float * MatA,float * MatB,float * MatC,int nx,int ny) { float * a=MatA; float * b=MatB; float * c=MatC; for(int j=0;j<ny;j++) { for(int i=0;i<nx;i++) { c[i]=a[i]+b[i]; } c+=nx; b+=nx; a+=nx; } } __global__ void sumMatrix(float * MatA,float * MatB,float * MatC,int nx,int ny) { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; int idx=ix+iy*ny; if (ix<nx && iy<ny) { MatC[idx]=MatA[idx]+MatB[idx]; } } int main(int argc,char** argv) { printf("strating...\n"); initDevice(0); int nx=1<<12; int ny=1<<12; int nxy=nx*ny; int nBytes=nxy*sizeof(float); //Malloc float* A_host=(float*)malloc(nBytes); float* B_host=(float*)malloc(nBytes); float* C_host=(float*)malloc(nBytes); float* C_from_gpu=(float*)malloc(nBytes); initialData(A_host,nxy); initialData(B_host,nxy); //hipMalloc float *A_dev=NULL; float *B_dev=NULL; float *C_dev=NULL; CHECK(hipMalloc((void**)&A_dev,nBytes)); CHECK(hipMalloc((void**)&B_dev,nBytes)); CHECK(hipMalloc((void**)&C_dev,nBytes)); CHECK(hipMemcpy(A_dev,A_host,nBytes,hipMemcpyHostToDevice)); CHECK(hipMemcpy(B_dev,B_host,nBytes,hipMemcpyHostToDevice)); int dimx=32; int dimy=32; // cpu compute hipMemcpy(C_from_gpu,C_dev,nBytes,hipMemcpyDeviceToHost); double iStart=cpuSecond(); sumMatrix2D_CPU(A_host,B_host,C_host,nx,ny); double iElaps=cpuSecond()-iStart; printf("CPU Execution Time elapsed %f sec\n",iElaps); // 2d block and 2d grid dim3 block_0(dimx,dimy); dim3 grid_0((nx-1)/block_0.x+1,(ny-1)/block_0.y+1); iStart=cpuSecond(); hipLaunchKernelGGL(( sumMatrix), dim3(grid_0),dim3(block_0), 0, 0, A_dev,B_dev,C_dev,nx,ny); CHECK(hipDeviceSynchronize()); iElaps=cpuSecond()-iStart; printf("GPU Execution configuration<<<(%d,%d),(%d,%d)>>> Time elapsed %f sec\n", grid_0.x,grid_0.y,block_0.x,block_0.y,iElaps); CHECK(hipMemcpy(C_from_gpu,C_dev,nBytes,hipMemcpyDeviceToHost)); checkResult(C_host,C_from_gpu,nxy); hipFree(A_dev); hipFree(B_dev); hipFree(C_dev); free(A_host); free(B_host); free(C_host); free(C_from_gpu); hipDeviceReset(); return 0; }
433708a3d274a0387ec39903d3577454ac4f70e6.cu
#include <cuda_runtime.h> #include <stdio.h> #include "freshman.h" void sumMatrix2D_CPU(float * MatA,float * MatB,float * MatC,int nx,int ny) { float * a=MatA; float * b=MatB; float * c=MatC; for(int j=0;j<ny;j++) { for(int i=0;i<nx;i++) { c[i]=a[i]+b[i]; } c+=nx; b+=nx; a+=nx; } } __global__ void sumMatrix(float * MatA,float * MatB,float * MatC,int nx,int ny) { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; int idx=ix+iy*ny; if (ix<nx && iy<ny) { MatC[idx]=MatA[idx]+MatB[idx]; } } int main(int argc,char** argv) { printf("strating...\n"); initDevice(0); int nx=1<<12; int ny=1<<12; int nxy=nx*ny; int nBytes=nxy*sizeof(float); //Malloc float* A_host=(float*)malloc(nBytes); float* B_host=(float*)malloc(nBytes); float* C_host=(float*)malloc(nBytes); float* C_from_gpu=(float*)malloc(nBytes); initialData(A_host,nxy); initialData(B_host,nxy); //cudaMalloc float *A_dev=NULL; float *B_dev=NULL; float *C_dev=NULL; CHECK(cudaMalloc((void**)&A_dev,nBytes)); CHECK(cudaMalloc((void**)&B_dev,nBytes)); CHECK(cudaMalloc((void**)&C_dev,nBytes)); CHECK(cudaMemcpy(A_dev,A_host,nBytes,cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(B_dev,B_host,nBytes,cudaMemcpyHostToDevice)); int dimx=32; int dimy=32; // cpu compute cudaMemcpy(C_from_gpu,C_dev,nBytes,cudaMemcpyDeviceToHost); double iStart=cpuSecond(); sumMatrix2D_CPU(A_host,B_host,C_host,nx,ny); double iElaps=cpuSecond()-iStart; printf("CPU Execution Time elapsed %f sec\n",iElaps); // 2d block and 2d grid dim3 block_0(dimx,dimy); dim3 grid_0((nx-1)/block_0.x+1,(ny-1)/block_0.y+1); iStart=cpuSecond(); sumMatrix<<<grid_0,block_0>>>(A_dev,B_dev,C_dev,nx,ny); CHECK(cudaDeviceSynchronize()); iElaps=cpuSecond()-iStart; printf("GPU Execution configuration<<<(%d,%d),(%d,%d)>>> Time elapsed %f sec\n", grid_0.x,grid_0.y,block_0.x,block_0.y,iElaps); CHECK(cudaMemcpy(C_from_gpu,C_dev,nBytes,cudaMemcpyDeviceToHost)); checkResult(C_host,C_from_gpu,nxy); cudaFree(A_dev); cudaFree(B_dev); cudaFree(C_dev); free(A_host); free(B_host); free(C_host); free(C_from_gpu); cudaDeviceReset(); return 0; }
d4d29efcfb5ef959511366a61b9432f43879f071.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <fstream> using namespace std; __global__ void chaotic(double *X, double *R, int y) { int index = threadIdx.x + blockIdx.x * 1024; double sum = 0.0; double temp[48]; if (X[index] >= 0.5) { for (int i = 1; i < y; i++) { // X[index] = X[index] * R[index] * (1 - X[index]); } // R[index] = 3.99; } else { for (int i = 1; i < y; i++) { X[index] = X[index] * R[index] * (1 - X[index]); temp[i] = 3.86 + (X[index] * 0.14); sum = sum + temp[i]; } R[index] = sum / y; } } __global__ void postprocess(double* X, double* R, unsigned int* v) { int index = threadIdx.x + blockIdx.x * 1024; unsigned int M1 = 0, M2 = 0, M3 = 0, M4 = 0; if (blockIdx.x == 0) { unsigned long long* wsk; wsk = reinterpret_cast<long long unsigned*>(&X[index]); unsigned long long C1 = *wsk; wsk = reinterpret_cast<long long unsigned*>(&X[index + 33]); unsigned long long C2 = *wsk % 1024; unsigned int lsbmask = 0x80000000; for (int i = 0; i < 32; i++) { if ((lsbmask & C1) > 0) { M1 = M1 | lsbmask; } if ((lsbmask & C2) > 0) { M3 = M3 | lsbmask; } lsbmask = lsbmask >> 1; } lsbmask = 0x80000000; unsigned long long msbmask = 0x8000000000000000; for (int i = 0; i < 32; i++) { if ((msbmask & C1) > 0) { M2 = M2 | lsbmask; } if ((msbmask & C2) > 0) { M4 = M4 | lsbmask; } lsbmask = lsbmask >> 1; msbmask = msbmask >> 1; } v[index] = ((M1 + M4) ^ M3) + M2; v[index] = v[index] % 256; } } int main(void) { srand(time(NULL)); fstream plik; plik.open("plik.txt", ios::out); const int Beta = 1024; double *X = new double[Beta]; double *R = new double[Beta]; unsigned int *v = new unsigned int[Beta]; double* dX = new double[Beta]; double* dR = new double[Beta]; unsigned int* dv = new unsigned int[Beta]; int size = Beta * sizeof(double); int sizev = Beta * sizeof(unsigned int); hipMalloc((void**)&dX, size); hipMalloc((void**)&dR, size); hipMalloc((void**)&dv, sizev); X = (double *)malloc(size); R = (double *)malloc(size); v = (unsigned int*)malloc(sizev); for (int i = 0; i < Beta; i++) { X[i] = (rand() % 1000) / 1000.0; while (X[i] < 0.01) { X[i] = (rand() % 1000) / 1000.0; } R[i] = 3.86 + (X[i] * 0.14); // plik << X[i] << " " <<R[i]<< endl; } hipMemcpy(dX, X, size, hipMemcpyHostToDevice); hipMemcpy(dR, R, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( chaotic) , dim3(32), dim3(1024) , 0, 0, dX, dR, 48); hipLaunchKernelGGL(( chaotic) , dim3(32), dim3(1024) , 0, 0, dX, dR, 2); hipLaunchKernelGGL(( postprocess) , dim3(32), dim3(1024) , 0, 0, dX, dR, dv); hipMemcpy(X, dX, size, hipMemcpyDeviceToHost); hipMemcpy(R, dR, size, hipMemcpyDeviceToHost); hipMemcpy(v, dv, sizev, hipMemcpyDeviceToHost); for (int i = 0; i < Beta; i++) { plik << v[i] << endl; //plik << X[i] << " " << R[i] << endl; } return 0; }
d4d29efcfb5ef959511366a61b9432f43879f071.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <fstream> using namespace std; __global__ void chaotic(double *X, double *R, int y) { int index = threadIdx.x + blockIdx.x * 1024; double sum = 0.0; double temp[48]; if (X[index] >= 0.5) { for (int i = 1; i < y; i++) { // X[index] = X[index] * R[index] * (1 - X[index]); } // R[index] = 3.99; } else { for (int i = 1; i < y; i++) { X[index] = X[index] * R[index] * (1 - X[index]); temp[i] = 3.86 + (X[index] * 0.14); sum = sum + temp[i]; } R[index] = sum / y; } } __global__ void postprocess(double* X, double* R, unsigned int* v) { int index = threadIdx.x + blockIdx.x * 1024; unsigned int M1 = 0, M2 = 0, M3 = 0, M4 = 0; if (blockIdx.x == 0) { unsigned long long* wsk; wsk = reinterpret_cast<long long unsigned*>(&X[index]); unsigned long long C1 = *wsk; wsk = reinterpret_cast<long long unsigned*>(&X[index + 33]); unsigned long long C2 = *wsk % 1024; unsigned int lsbmask = 0x80000000; for (int i = 0; i < 32; i++) { if ((lsbmask & C1) > 0) { M1 = M1 | lsbmask; } if ((lsbmask & C2) > 0) { M3 = M3 | lsbmask; } lsbmask = lsbmask >> 1; } lsbmask = 0x80000000; unsigned long long msbmask = 0x8000000000000000; for (int i = 0; i < 32; i++) { if ((msbmask & C1) > 0) { M2 = M2 | lsbmask; } if ((msbmask & C2) > 0) { M4 = M4 | lsbmask; } lsbmask = lsbmask >> 1; msbmask = msbmask >> 1; } v[index] = ((M1 + M4) ^ M3) + M2; v[index] = v[index] % 256; } } int main(void) { srand(time(NULL)); fstream plik; plik.open("plik.txt", ios::out); const int Beta = 1024; double *X = new double[Beta]; double *R = new double[Beta]; unsigned int *v = new unsigned int[Beta]; double* dX = new double[Beta]; double* dR = new double[Beta]; unsigned int* dv = new unsigned int[Beta]; int size = Beta * sizeof(double); int sizev = Beta * sizeof(unsigned int); cudaMalloc((void**)&dX, size); cudaMalloc((void**)&dR, size); cudaMalloc((void**)&dv, sizev); X = (double *)malloc(size); R = (double *)malloc(size); v = (unsigned int*)malloc(sizev); for (int i = 0; i < Beta; i++) { X[i] = (rand() % 1000) / 1000.0; while (X[i] < 0.01) { X[i] = (rand() % 1000) / 1000.0; } R[i] = 3.86 + (X[i] * 0.14); // plik << X[i] << " " <<R[i]<< endl; } cudaMemcpy(dX, X, size, cudaMemcpyHostToDevice); cudaMemcpy(dR, R, size, cudaMemcpyHostToDevice); chaotic <<< 32, 1024 >>> (dX, dR, 48); chaotic <<< 32, 1024 >>> (dX, dR, 2); postprocess <<< 32, 1024 >>> (dX, dR, dv); cudaMemcpy(X, dX, size, cudaMemcpyDeviceToHost); cudaMemcpy(R, dR, size, cudaMemcpyDeviceToHost); cudaMemcpy(v, dv, sizev, cudaMemcpyDeviceToHost); for (int i = 0; i < Beta; i++) { plik << v[i] << endl; //plik << X[i] << " " << R[i] << endl; } return 0; }
78cd3c040820529c417d01d4f5dd719bd90f1b6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHReduceApplyUtils.cuh> #include <ATen/hip/cub.cuh> #include <ATen/native/hip/EmbeddingBackwardKernel.cuh> #include <ATen/native/hip/SortingCommon.cuh> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int BLOCKDIMY = 16; #else static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void embedding_backward_feature_kernel (index_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t, typename index_t> __global__ void embedding_backward_kernel( index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight, index_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void renorm_kernel( scalar_t* weights, index_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += ::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count); Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt}); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (num_indices <= 3072 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE)); dim3 block(C10_WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t>) , dim3(grid), dim3(block), sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY, stream, indices_contig.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { auto range = at::arange(num_indices, indices.options()); int64_t nbits = cuda::cub::get_num_bits(num_weights); cuda::cub::sort_pairs( indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(), range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(), num_indices, false, 0, nbits); if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count); } }); return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () { auto num_indices = indices.numel(); auto indices_contig = std::get<0>(indices.sort()).contiguous(); auto unique_indices = at::empty(indices.numel(), indices.options()); auto num_unique_indices = at::empty({}, indices.options().dtype(kLong)); cuda::cub::unique( indices_contig.data_ptr<index_t>(), unique_indices.data_ptr<index_t>(), num_unique_indices.data_ptr<int64_t>(), num_indices ); dim3 grid = num_unique_indices.item<int64_t>(); dim3 block = 128; int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream, self.data_ptr<scalar_t>(), unique_indices.data_ptr<index_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return self; } }} // namespace at::native
78cd3c040820529c417d01d4f5dd719bd90f1b6f.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #include <c10/macros/Macros.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCReduceApplyUtils.cuh> #include <ATen/cuda/cub.cuh> #include <ATen/native/cuda/EmbeddingBackwardKernel.cuh> #include <ATen/native/cuda/SortingCommon.cuh> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int BLOCKDIMY = 16; #else static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void embedding_backward_feature_kernel (index_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t, typename index_t> __global__ void embedding_backward_kernel( index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight, index_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t, typename index_t> __global__ void renorm_kernel( scalar_t* weights, index_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += std::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace template<typename index_t> void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count); Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt}); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (num_indices <= 3072 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE)); dim3 block(C10_WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t> <<<grid, block, sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY, stream>>> (indices_contig.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), grad_weight.data_ptr<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor count; AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () { auto range = at::arange(num_indices, indices.options()); int64_t nbits = cuda::cub::get_num_bits(num_weights); cuda::cub::sort_pairs( indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(), range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(), num_indices, false, 0, nbits); if (scale_grad_by_freq) { count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count); } }); return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () { auto num_indices = indices.numel(); auto indices_contig = std::get<0>(indices.sort()).contiguous(); auto unique_indices = at::empty(indices.numel(), indices.options()); auto num_unique_indices = at::empty({}, indices.options().dtype(kLong)); cuda::cub::unique( indices_contig.data_ptr<index_t>(), unique_indices.data_ptr<index_t>(), num_unique_indices.data_ptr<int64_t>(), num_indices ); dim3 grid = num_unique_indices.item<int64_t>(); dim3 block = 128; int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>( self.data_ptr<scalar_t>(), unique_indices.data_ptr<index_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return self; } }} // namespace at::native
761f8880aeb366a421a4b925913d0e9727244ffe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pfem2particle.h" #include <iostream> #include <fstream> #include <deal.II/fe/fe.h> #include <deal.II/fe/fe_q.h> #include <deal.II/fe/fe_system.h> #include <deal.II/fe/fe_values.h> #include <deal.II/fe/fe_nothing.h> #include <deal.II/fe/fe_poly.h> #include <deal.II/grid/grid_tools.h> #include <deal.II/particles/particle_iterator.h> #include <deal.II/particles/particle_handler.h> #include <deal.II/dofs/dof_handler.h> #include <deal.II/dofs/dof_accessor.h> #include <deal.II/lac/vector.h> #include <deal.II/numerics/vector_tools.h> #include <deal.II/base/quadrature_lib.h> #include <deal.II/base/geometry_info.h> #include <deal.II/base/function.h> #include <deal.II/base/tensor.h> #include <deal.II/grid/tria_accessor.h> #include <deal.II/lac/precondition.h> #include <deal.II/lac/solver_gmres.h> #include "omp.h" using namespace dealii; pfem2Particle::pfem2Particle(const Point<2> & location,const Point<2> & reference_location,const unsigned id) : id (id) { this->location[0] = location[0]; this->location[1] = location[1]; this->reference_location[0] = reference_location[0]; this->reference_location[1] = reference_location[1]; this->velocity[0] = 0.0; this->velocity[1] = 0.0; } pfem2Particle::pfem2Particle() : pfem2Particle(Point<2>(), Point<2>(), 0) { } void pfem2Particle::set_location (const Point<2> &new_location) { location[0] = new_location[0]; location[1] = new_location[1]; } const Point<2> pfem2Particle::get_location () const { return Point<2>(location[0], location[1]); } void pfem2Particle::set_reference_location (const Point<2> &new_reference_location) { reference_location[0] = new_reference_location[0]; reference_location[1] = new_reference_location[1]; } const Point<2> pfem2Particle::get_reference_location () const { return Point<2>(reference_location[0], reference_location[1]); } unsigned int pfem2Particle::get_id () const { return id; } void pfem2Particle::set_cell_dofs(const typename DoFHandler<2>::active_cell_iterator &cell) { for(unsigned int i = 0; i < GeometryInfo<2>::vertices_per_cell; ++i) cell_dofs[i] = cell->vertex_dof_index(i, 0); } void pfem2Particle::set_tria_position(const int &new_position) { tria_position = new_position; } int pfem2Particle::get_tria_position() const { return tria_position; } const Tensor<1,2> pfem2Particle::get_velocity() const { return Tensor<1,2>({velocity[0],velocity[1]}); } const Tensor<1,2> pfem2Particle::get_velocity_ext() const { return Tensor<1,2>({velocity_ext[0],velocity_ext[1]}); } double pfem2Particle::get_velocity_component(int component) const { return velocity[component]; } void pfem2Particle::set_velocity (const Tensor<1,2> &new_velocity) { velocity[0] = new_velocity[0]; velocity[1] = new_velocity[1]; } void pfem2Particle::set_velocity_component (const double value, int component) { velocity[component] = value; } void pfem2Particle::set_velocity_ext (const Tensor<1,2> &new_ext_velocity) { velocity_ext[0] = new_ext_velocity[0]; velocity_ext[1] = new_ext_velocity[1]; } Triangulation<2>::cell_iterator pfem2Particle::get_surrounding_cell(const Triangulation<2> &triangulation) const { const typename Triangulation<2>::cell_iterator cell(&triangulation, triangulation.n_levels() - 1, tria_position); return cell; } DoFHandler<2>::cell_iterator pfem2Particle::get_surrounding_cell(const Triangulation<2> &triangulation, const DoFHandler<2> &dof_handler) const { const typename DoFHandler<2>::cell_iterator cell(&triangulation, triangulation.n_levels() - 1, tria_position, &dof_handler); return cell; } unsigned int pfem2Particle::find_closest_vertex_of_cell(const typename Triangulation<2>::active_cell_iterator &cell, const Mapping<2> &mapping) { //transformation of local particle coordinates transformation is required as the global particle coordinates have already been updated by the time this function is called const Point<2> old_position = mapping.transform_unit_to_real_cell(cell, get_reference_location()); Tensor<1,2> velocity_normalized = get_velocity_ext() / get_velocity_ext().norm(); Tensor<1,2> particle_to_vertex = cell->vertex(0) - old_position; particle_to_vertex /= particle_to_vertex.norm(); double maximum_angle = velocity_normalized * particle_to_vertex; unsigned int closest_vertex = 0; for (unsigned int v = 1; v < GeometryInfo<2>::vertices_per_cell; ++v){ particle_to_vertex = cell->vertex(v) - old_position; particle_to_vertex /= particle_to_vertex.norm(); const double v_angle = velocity_normalized * particle_to_vertex; if (v_angle > maximum_angle){ closest_vertex = v; maximum_angle = v_angle; } } return closest_vertex; } std::size_t pfem2Particle::serialized_size_in_bytes() const { std::size_t size = sizeof(id) + sizeof(location) + sizeof(reference_location) + sizeof(tria_position) + sizeof(velocity) + sizeof(velocity_ext); return size; } pfem2ParticleHandler::pfem2ParticleHandler(const parallel::distributed::Triangulation<2> &tria, const Mapping<2> &coordMapping) : triangulation(&tria, typeid(*this).name()) , mapping(&coordMapping, typeid(*this).name()) , particles() {} pfem2ParticleHandler::~pfem2ParticleHandler() { clear_particles(); } void pfem2ParticleHandler::initialize_maps() { vertex_to_cells = std::vector<std::set<typename Triangulation<2>::active_cell_iterator>>(GridTools::vertex_to_cell_map(*triangulation)); vertex_to_cell_centers = std::vector<std::vector<Tensor<1,2>>>(GridTools::vertex_to_cell_centers_directions(*triangulation,vertex_to_cells)); } void pfem2ParticleHandler::clear_particles() { particles.clear(); } std::vector<pfem2Particle>::iterator pfem2ParticleHandler::remove_particle(std::vector<pfem2Particle>::iterator particleIndex) { return particles.erase(particleIndex); } void pfem2ParticleHandler::insert_particle(pfem2Particle &particle, const typename DoFHandler<2>::active_cell_iterator &cell) { particle.set_tria_position(cell->index()); particle.set_cell_dofs(cell); particles.push_back(particle); } unsigned int pfem2ParticleHandler::n_global_particles() const { return particles.size(); } bool compare_particle_association(const unsigned int a, const unsigned int b, const Tensor<1,2> &particle_direction, const std::vector<Tensor<1,2> > &center_directions) { const double scalar_product_a = center_directions[a] * particle_direction; const double scalar_product_b = center_directions[b] * particle_direction; return scalar_product_a > scalar_product_b; } void pfem2ParticleHandler::sort_particles_into_subdomains_and_cells(const DoFHandler<2> &dof_handler) { //std::cout << "Sorting particles" << std::endl; for(auto it = begin(); it != end(); ){ const typename Triangulation<2>::cell_iterator cell = (*it).get_surrounding_cell(*triangulation); bool found_cell = false; try{ const Point<2> p_unit = mapping->transform_real_to_unit_cell(cell, (*it).get_location()); if(GeometryInfo<2>::is_inside_unit_cell(p_unit)){ (*it).set_reference_location(p_unit); found_cell = true; ++it; } } catch(typename Mapping<2>::ExcTransformationFailed &){ #ifdef VERBOSE_OUTPUT std::cout << "Transformation failed for particle with global coordinates " << (*it).get_location() << " (checked cell index #" << cell->index() << ")" << std::endl; #endif // VERBOSE_OUTPUT } if(!found_cell){ std::vector<unsigned int> neighbor_permutation; Point<2> current_reference_position; typename Triangulation<2>::active_cell_iterator current_cell = (*it).get_surrounding_cell(*triangulation); const unsigned int closest_vertex = (*it).find_closest_vertex_of_cell(current_cell, *mapping); Tensor<1,2> vertex_to_particle = (*it).get_location() - current_cell->vertex(closest_vertex); vertex_to_particle /= vertex_to_particle.norm(); const unsigned int closest_vertex_index = current_cell->vertex_index(closest_vertex); const unsigned int n_neighbor_cells = vertex_to_cells[closest_vertex_index].size(); neighbor_permutation.resize(n_neighbor_cells); for (unsigned int i=0; i<n_neighbor_cells; ++i) neighbor_permutation[i] = i; std::sort(neighbor_permutation.begin(), neighbor_permutation.end(), std::bind(&compare_particle_association, std::placeholders::_1, std::placeholders::_2, std::cref(vertex_to_particle), std::cref(vertex_to_cell_centers[closest_vertex_index]))); for (unsigned int i=0; i<n_neighbor_cells; ++i){ typename std::set<typename Triangulation<2>::active_cell_iterator>::const_iterator cell = vertex_to_cells[closest_vertex_index].begin(); std::advance(cell, neighbor_permutation[i]); try{ const Point<2> p_unit = mapping->transform_real_to_unit_cell(*cell, (*it).get_location()); if (GeometryInfo<2>::is_inside_unit_cell(p_unit)){ current_cell = *cell; (*it).set_reference_location(p_unit); (*it).set_tria_position(current_cell->index()); const typename DoFHandler<2>::cell_iterator dofCell(triangulation, triangulation->n_levels() - 1, current_cell->index(), &dof_handler); (*it).set_cell_dofs(dofCell); found_cell = true; break; } } catch(typename Mapping<2>::ExcTransformationFailed &) { } } if (!found_cell){ *it = std::move(particles.back()); particles.pop_back(); } else ++it; } } } std::vector<pfem2Particle>::iterator pfem2ParticleHandler::begin() { return particles.begin(); } std::vector<pfem2Particle>::iterator pfem2ParticleHandler::end() { return particles.end(); } pfem2Solver::pfem2Solver() : mpi_communicator (MPI_COMM_WORLD), tria(mpi_communicator,Triangulation<2>::maximum_smoothing), particle_handler(tria, mapping), feV (1), feP (1), fe(FE_Q<2>(1), 1), dof_handlerV (tria), dof_handlerP (tria), quadrature_formula(2), face_quadrature_formula(2), feV_values (feV, quadrature_formula, update_values | update_gradients | update_quadrature_points | update_JxW_values), feP_values (feP, quadrature_formula, update_values | update_gradients | update_quadrature_points | update_JxW_values), feV_face_values (feV, face_quadrature_formula, update_values | update_quadrature_points | update_gradients | update_normal_vectors | update_JxW_values), feP_face_values (feP, face_quadrature_formula, update_values | update_quadrature_points | update_gradients | update_normal_vectors | update_JxW_values), dofs_per_cellV (feV.dofs_per_cell), dofs_per_cellP (feP.dofs_per_cell), local_dof_indicesV (dofs_per_cellV), local_dof_indicesP (dofs_per_cellP), n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)), this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)), pcout (std::cout,(this_mpi_process == 0)), n_q_points (quadrature_formula.size()), n_face_q_points (face_quadrature_formula.size()), quantities({0,0}) { setCudaConstants(); } pfem2Solver::~pfem2Solver() { } void pfem2Solver::seed_particles_into_cell (const typename DoFHandler<2>::cell_iterator &cell) { double hx = 1.0/quantities[0]; double hy = 1.0/quantities[1]; double shapeValue; for(unsigned int i = 0; i < quantities[0]; ++i){ for(unsigned int j = 0; j < quantities[1]; ++j){ pfem2Particle particle(mapping.transform_unit_to_real_cell(cell, Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy)), Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy), ++particleCount); for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex){ shapeValue = fe.shape_value(vertex, particle.get_reference_location()); particle.set_velocity_component(particle.get_velocity_component(0) + shapeValue * locally_relevant_solutionVx(cell->vertex_dof_index(vertex, 0)), 0); particle.set_velocity_component(particle.get_velocity_component(1) + shapeValue * locally_relevant_solutionVy(cell->vertex_dof_index(vertex, 0)), 1); }//vertex particle_handler.insert_particle(particle, cell); } } } bool pfem2Solver::check_cells_for_empty_parts () { bool res = false; std::map<int, std::map<std::vector<unsigned int>, unsigned int>> particlesInCellParts; std::vector<std::vector<pfem2Particle>::iterator> particles_to_be_deleted; //, double hx = 1.0/quantities[0]; double hy = 1.0/quantities[1]; unsigned int num_x, num_y; for(auto particleIndex = particle_handler.begin(); particleIndex != particle_handler.end(); ){ num_x = (*particleIndex).get_reference_location()(0)/hx; num_y = (*particleIndex).get_reference_location()(1)/hy; if(particlesInCellParts[(*particleIndex).get_tria_position()][{num_x,num_y}] > MAX_PARTICLES_PER_CELL_PART){ *particleIndex = std::move(particle_handler.particles.back()); particle_handler.particles.pop_back(); res = true; } else { particlesInCellParts[(*particleIndex).get_tria_position()][{num_x,num_y}]++; ++particleIndex; } } double shapeValue; // : 0 - 1 for(auto cellInfo = particlesInCellParts.begin(); cellInfo != particlesInCellParts.end(); ++cellInfo){ const DoFHandler<2>::cell_iterator cell(&tria, tria.n_levels() - 1, (*cellInfo).first, &dof_handlerV); for(unsigned int i = 0; i < quantities[0]; i++) for(unsigned int j = 0; j < quantities[1]; j++) if((*cellInfo).second[{i,j}] == 0){ pfem2Particle particle(mapping.transform_unit_to_real_cell(cell, Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy)), Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy), ++particleCount); for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex){ shapeValue = fe.shape_value(vertex, particle.get_reference_location()); particle.set_velocity_component(particle.get_velocity_component(0) + shapeValue * locally_relevant_solutionVx(cell->vertex_dof_index(vertex, 0)), 0); particle.set_velocity_component(particle.get_velocity_component(1) + shapeValue * locally_relevant_solutionVy(cell->vertex_dof_index(vertex, 0)), 1); }//vertex particle_handler.insert_particle(particle, cell); res = true; } } // //for(std::vector<std::vector<pfem2Particle>::iterator>::reverse_iterator it = particles_to_be_deleted.rbegin(); it != particles_to_be_deleted.rend(); ++it) particle_handler.remove_particle(*it); //if(!particles_to_be_deleted.empty()) res = true; return res; } void pfem2Solver::seed_particles(const std::vector < unsigned int > & quantities) { TimerOutput::Scope timer_section(*timer, "Particles' seeding"); if(quantities.size() < 2){ return; } this->quantities = quantities; typename DoFHandler<2>::cell_iterator cell = dof_handlerV.begin(tria.n_levels()-1), endc = dof_handlerV.end(tria.n_levels()-1); for (; cell != endc; ++cell) if (cell->is_locally_owned()) seed_particles_into_cell(cell); std::cout << "Created and placed " << particleCount << " particles" << std::endl; std::cout << "Particle handler contains " << particle_handler.n_global_particles() << " particles" << std::endl; } __global__ void correct_particle_velocities_cuda (const unsigned int Numparticles, pfem2Particle *particles, const double *deltaVx, const double *deltaVy) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (i < Numparticles){ double shapeValue; double vel_in_partVx = 0.0; double vel_in_partVy = 0.0; double *velocity = (double*)((char*)&particles[0] + i * particleSize + velocityPos); double *refLocation = (double*)((char*)&particles[0] + i * particleSize + refLocationPos); int *cellDofs = (int*)((char*)&particles[0] + i * particleSize + cellDoFsPos); shapeValue = (1 - refLocation[0]) * (1 - refLocation[1]); vel_in_partVx += shapeValue * deltaVx[cellDofs[0]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[0]]; shapeValue = refLocation[0] * (1 - refLocation[1]); vel_in_partVx += shapeValue * deltaVx[cellDofs[1]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[1]]; shapeValue = (1 - refLocation[0]) * refLocation[1]; vel_in_partVx += shapeValue * deltaVx[cellDofs[2]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[2]]; shapeValue = refLocation[0] * refLocation[1]; vel_in_partVx += shapeValue * deltaVx[cellDofs[3]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[3]]; velocity[0] += vel_in_partVx; velocity[1] += vel_in_partVy; *(((char*)&particles[0] + i * particleSize + velocityPos)) = *velocity; } } void pfem2Solver::correct_particles_velocities() { TimerOutput::Scope timer_section(*timer, "Particles' velocities correction"); hipError_t err = hipSuccess; //Mesh node velocity difference vectors size_t meshVectorSize = dof_handlerV.n_dofs() * sizeof(double); //Vx double *hostDeltaVx = (double *)malloc(meshVectorSize); if (hostDeltaVx == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostDeltaVx[i] = locally_relevant_solutionVx[i] - locally_relevant_old_solutionVx[i]; double *deviceDeltaVx = NULL; err = hipMalloc((void **)&deviceDeltaVx, meshVectorSize); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector deviceDeltaVx (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(deviceDeltaVx, hostDeltaVx, meshVectorSize, hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector hostDeltaVx from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //Vy double *hostDeltaVy = (double *)malloc(meshVectorSize); if (hostDeltaVy == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostDeltaVy[i] = locally_relevant_solutionVy[i] - locally_relevant_old_solutionVy[i]; double *deviceDeltaVy = NULL; err = hipMalloc((void **)&deviceDeltaVy, meshVectorSize); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector deviceDeltaVy (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(deviceDeltaVy, hostDeltaVy, meshVectorSize, hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector hostDeltaVy from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } dim3 grid_dim, block_dim; constexpr unsigned int particles_per_block = CUDAWrappers::warp_size; const double apply_n_blocks = ::ceil(static_cast<double>(particle_handler.n_global_particles()) / static_cast<double>(particles_per_block)); const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks)); const unsigned int apply_y_n_blocks = ::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)); grid_dim = dim3(apply_x_n_blocks, apply_y_n_blocks); block_dim = dim3(particles_per_block); pfem2Particle *deviceParticles = NULL; err = hipMalloc((void **)&deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector deviceParticles (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(deviceParticles, particle_handler.particles.data(), particle_handler.n_global_particles() * sizeof(pfem2Particle), hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector hostParticles from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( correct_particle_velocities_cuda), dim3(grid_dim), dim3(block_dim), 0, 0, particle_handler.n_global_particles(), deviceParticles, deviceDeltaVx, deviceDeltaVy); AssertCudaKernel (); err = hipGetLastError(); if (err != hipSuccess){ fprintf(stderr, "Failed to launch move_particles_cuda kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } timer->enter_subsection("Memory copy"); err = hipMemcpy(particle_handler.particles.data(), deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle), hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector deviceParticles from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); hipFree(deviceParticles); hipFree(deviceDeltaVx); hipFree(deviceDeltaVy); free(hostDeltaVx); free(hostDeltaVy); //std::cout << "Finished correcting particles' velocities" << std::endl; } __global__ void move_particles_cuda ( const unsigned int Numparticles, const double time_step, pfem2Particle *particles, const double *solutionVx, const double *solutionVy) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (i < Numparticles){ double shapeValue; double vel_in_partVx = 0.0; double vel_in_partVy = 0.0; double *location = (double*)((char*)&particles[0] + i * particleSize + locationPos); double *refLocation = (double*)((char*)&particles[0] + i * particleSize + refLocationPos); int *cellDofs = (int*)((char*)&particles[0] + i * particleSize + cellDoFsPos); //printf("Particle %d has coordinates (%f, %f)\n", i, location[0], location[1]); //printf("Particle %d has local coordinates (%f, %f)\n", i, refLocation[0], refLocation[1]); //printf("Particle %d has cell with dofs (%d, %d, %d, %d)\n", i, cellDofs[0], cellDofs[1], cellDofs[2], cellDofs[3]); shapeValue = (1 - refLocation[0]) * (1 - refLocation[1]); vel_in_partVx += shapeValue * solutionVx[cellDofs[0]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[0]]; shapeValue = refLocation[0] * (1 - refLocation[1]); vel_in_partVx += shapeValue * solutionVx[cellDofs[1]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[1]]; shapeValue = (1 - refLocation[0]) * refLocation[1]; vel_in_partVx += shapeValue * solutionVx[cellDofs[2]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[2]]; shapeValue = refLocation[0] * refLocation[1]; vel_in_partVx += shapeValue * solutionVx[cellDofs[3]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[3]]; location[0] += time_step * vel_in_partVx; location[1] += time_step * vel_in_partVy; *((char*)&particles[0] + i * particleSize + locationPos) = *location; *((double*)((char*)&particles[0] + i * particleSize + velocityExtPos)) = vel_in_partVx; *((double*)((char*)&particles[0] + i * particleSize + velocityExtPos + sizeof(double))) = vel_in_partVy; } } void pfem2Solver::move_particles() // { TimerOutput::Scope timer_section(*timer, "Particles' movement"); double min_time_step = time_step / PARTICLES_MOVEMENT_STEPS; hipError_t err = hipSuccess; //Mesh node velocity vectors size_t meshVectorSize = dof_handlerV.n_dofs() * sizeof(double); //Vx double *hostSolutionVx = (double *)malloc(meshVectorSize); if (hostSolutionVx == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostSolutionVx[i] = locally_relevant_solutionVx[i]; double *deviceSolutionVx = NULL; err = hipMalloc((void **)&deviceSolutionVx, meshVectorSize); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector deviceSolutionVx (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(deviceSolutionVx, hostSolutionVx, meshVectorSize, hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector hostSolutionVx from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //Vy double *hostSolutionVy = (double *)malloc(meshVectorSize); if (hostSolutionVy == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostSolutionVy[i] = locally_relevant_solutionVy[i]; double *deviceSolutionVy = NULL; err = hipMalloc((void **)&deviceSolutionVy, meshVectorSize); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector deviceSolutionVy (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(deviceSolutionVy, hostSolutionVy, meshVectorSize, hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector hostSolutionVy from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } for (int np_m = 0; np_m < PARTICLES_MOVEMENT_STEPS; ++np_m) { timer->enter_subsection("Variables preparation"); dim3 grid_dim, block_dim; constexpr unsigned int particles_per_block = CUDAWrappers::warp_size; const double apply_n_blocks = ::ceil(static_cast<double>(particle_handler.n_global_particles()) / static_cast<double>(particles_per_block)); const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks)); const unsigned int apply_y_n_blocks = ::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)); grid_dim = dim3(apply_x_n_blocks, apply_y_n_blocks); block_dim = dim3(particles_per_block); pfem2Particle *deviceParticles = NULL; err = hipMalloc((void **)&deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector deviceParticles (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(deviceParticles, particle_handler.particles.data(), particle_handler.n_global_particles() * sizeof(pfem2Particle), hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector hostParticles from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); timer->enter_subsection("CUDA step"); hipLaunchKernelGGL(( move_particles_cuda), dim3(grid_dim), dim3(block_dim), 0, 0, particle_handler.n_global_particles(), min_time_step, deviceParticles, deviceSolutionVx, deviceSolutionVy); AssertCudaKernel (); err = hipGetLastError(); if (err != hipSuccess){ fprintf(stderr, "Failed to launch move_particles_cuda kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); timer->enter_subsection("Memory copy"); err = hipMemcpy(particle_handler.particles.data(), deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle), hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector deviceParticles from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); timer->enter_subsection("Particles' sorting"); particle_handler.sort_particles_into_subdomains_and_cells(dof_handlerV); timer->leave_subsection(); timer->enter_subsection("Memory release"); hipFree(deviceParticles); timer->leave_subsection(); }//np_m // ( ) timer->enter_subsection("Checking cells for empty parts"); check_cells_for_empty_parts(); timer->leave_subsection(); //std::cout << "Finished moving particles" << std::endl; hipFree(deviceSolutionVx); hipFree(deviceSolutionVy); free(hostSolutionVx); free(hostSolutionVy); } __global__ void distribute_particle_velocities_cuda ( const unsigned int Numparticles, const pfem2Particle *particles, double *solutionVx, double *solutionVy, double *nodeWeights) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (i < Numparticles){ double shapeValue; double *velocity = (double*)((char*)&particles[0] + i * particleSize + velocityPos); double *refLocation = (double*)((char*)&particles[0] + i * particleSize + refLocationPos); int *cellDofs = (int*)((char*)&particles[0] + i * particleSize + cellDoFsPos); shapeValue = (1 - refLocation[0]) * (1 - refLocation[1]); atomicAdd(&solutionVx[cellDofs[0]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[0]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[0]], shapeValue); shapeValue = refLocation[0] * (1 - refLocation[1]); atomicAdd(&solutionVx[cellDofs[1]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[1]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[1]], shapeValue); shapeValue = (1 - refLocation[0]) * refLocation[1]; atomicAdd(&solutionVx[cellDofs[2]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[2]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[2]], shapeValue); shapeValue = refLocation[0] * refLocation[1]; atomicAdd(&solutionVx[cellDofs[3]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[3]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[3]], shapeValue); } } __global__ void calculate_node_velocities_cuda ( const unsigned int dof_count, double *solutionVx, double *solutionVy, const double *nodeWeights) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if(i < dof_count){ solutionVx[i] /= nodeWeights[i]; solutionVy[i] /= nodeWeights[i]; //printf("Velocity for i=%d equals (%f, %f) with weight=%f\n", i, solutionVx[i], solutionVy[i], nodeWeights[i]); } } void pfem2Solver::distribute_particle_velocities_to_grid() // { TimerOutput::Scope timer_section(*timer, "Distribution of particles' velocities to grid nodes"); hipError_t err = hipSuccess; dim3 grid_dim, block_dim; constexpr unsigned int particles_per_block = CUDAWrappers::warp_size; const double apply_n_blocks = ::ceil(static_cast<double>(particle_handler.n_global_particles()) / static_cast<double>(particles_per_block)); const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks)); const unsigned int apply_y_n_blocks = ::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)); grid_dim = dim3(apply_x_n_blocks, apply_y_n_blocks); block_dim = dim3(particles_per_block); pfem2Particle *deviceParticles = NULL; err = hipMalloc((void **)&deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle)); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector deviceParticles (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(deviceParticles, particle_handler.particles.data(), particle_handler.n_global_particles() * sizeof(pfem2Particle), hipMemcpyHostToDevice); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector hostParticles from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } size_t meshVectorSize = dof_handlerV.n_dofs() * sizeof(double); double *deviceSolutionVx = NULL; err = hipMalloc((void **)&deviceSolutionVx, meshVectorSize); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector deviceSolutionVx (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } double *deviceSolutionVy = NULL; err = hipMalloc((void **)&deviceSolutionVy, meshVectorSize); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector deviceSolutionVy (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } double *deviceWeights = NULL; err = hipMalloc((void **)&deviceWeights, meshVectorSize); if (err != hipSuccess){ fprintf(stderr, "Failed to allocate device vector deviceSolutionVy (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( distribute_particle_velocities_cuda), dim3(grid_dim), dim3(block_dim), 0, 0, particle_handler.n_global_particles(), deviceParticles, deviceSolutionVx, deviceSolutionVy, deviceWeights); hipLaunchKernelGGL(( calculate_node_velocities_cuda), dim3(grid_dim), dim3(block_dim), 0, 0, dof_handlerV.n_dofs(), deviceSolutionVx, deviceSolutionVy, deviceWeights); AssertCudaKernel (); err = hipGetLastError(); if (err != hipSuccess){ fprintf(stderr, "Failed to launch move_particles_cuda kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } double *hostSolutionVx = (double *)malloc(meshVectorSize); if (hostSolutionVx == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } err = hipMemcpy(hostSolutionVx, deviceSolutionVx, meshVectorSize, hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector deviceSolutionVx from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } double *hostSolutionVy = (double *)malloc(meshVectorSize); if (hostSolutionVy == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } err = hipMemcpy(hostSolutionVy, deviceSolutionVy, meshVectorSize, hipMemcpyDeviceToHost); if (err != hipSuccess){ fprintf(stderr, "Failed to copy vector deviceSolutionVy from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i){ locally_relevant_solutionVx[i] = hostSolutionVx[i]; locally_relevant_solutionVy[i] = hostSolutionVy[i]; } for(std::map<unsigned int, unsigned int>::iterator it = wallsAndBodyDoFs.begin(); it != wallsAndBodyDoFs.end(); ++it){ locally_relevant_solutionVx(it->first) = 0.0; locally_relevant_solutionVy(it->first) = 0.0; } locally_relevant_solutionVx.compress (VectorOperation::insert); locally_relevant_solutionVy.compress (VectorOperation::insert); hipFree(deviceWeights); hipFree(deviceParticles); hipFree(deviceSolutionVx); hipFree(deviceSolutionVy); free(hostSolutionVx); free(hostSolutionVy); //std::cout << "Finished distributing particles' velocities to grid" << std::endl; } void pfem2Solver::calculate_loads(types::boundary_id patch_id, std::ofstream *out){ TimerOutput::Scope timer_section(*timer, "Loads calculation"); double Fx_nu(0.0), Fx_p(0.0), Fy_nu(0.0), Fy_p(0.0), point_valueP, dVtdn, Cx_nu, Cx_p, Cy_nu, Cy_p; for(const auto &cell : dof_handlerP.active_cell_iterators()) if(cell->is_locally_owned()) for (unsigned int face_number=0; face_number < GeometryInfo<2>::faces_per_cell; ++face_number) if (cell->face(face_number)->at_boundary() && cell->face(face_number)->boundary_id() == patch_id) { feP_face_values.reinit (cell, face_number); for (unsigned int q_point=0; q_point < n_face_q_points; ++q_point) { point_valueP = 0.0; dVtdn = 0.0; for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex){ point_valueP += locally_relevant_solutionP(cell->vertex_dof_index(vertex,0)) * feP_face_values.shape_value(vertex, q_point); dVtdn += (locally_relevant_solutionVx(cell->vertex_dof_index(vertex,0)) * feP_face_values.normal_vector(q_point)[1] - locally_relevant_solutionVy(cell->vertex_dof_index(vertex,0)) * feP_face_values.normal_vector(q_point)[0]) * (feP_face_values.shape_grad(vertex, q_point)[0] * feP_face_values.normal_vector(q_point)[0] + feP_face_values.shape_grad(vertex, q_point)[1] * feP_face_values.normal_vector(q_point)[1]); }//vertex Fx_nu += mu * dVtdn * feP_face_values.normal_vector(q_point)[1] * feP_face_values.JxW (q_point); Fx_p -= point_valueP * feP_face_values.normal_vector(q_point)[0] * feP_face_values.JxW (q_point); Fy_nu -= mu * dVtdn * feP_face_values.normal_vector(q_point)[0] * feP_face_values.JxW (q_point); Fy_p -= point_valueP * feP_face_values.normal_vector(q_point)[1] * feP_face_values.JxW (q_point); }//q_index }//if Cx_nu = 2.0 * Fx_nu / (rho * uMean * uMean * diam); Cx_p = 2.0 * Fx_p / (rho * uMean * uMean * diam); Cy_nu = 2.0 * Fy_nu / (rho * uMean * uMean * diam); Cy_p = 2.0 * Fy_p / (rho * uMean * uMean * diam); //pressure difference double pressureDifference = 0.0; if(xaDoF != -100) pressureDifference += locally_relevant_solutionP(xaDoF); if(xeDoF != -100) pressureDifference -= locally_relevant_solutionP(xeDoF); const double local_coeffs[5] = { Cx_nu, Cx_p, Cy_nu, Cy_p, pressureDifference }; double global_coeffs[5]; Utilities::MPI::sum(local_coeffs, mpi_communicator, global_coeffs); if (this_mpi_process == 0){ double Cx = global_coeffs[0] + global_coeffs[1]; double Cy = global_coeffs[2] + global_coeffs[3]; *out << time << "," << Cx << "," << Cy << "," << global_coeffs[4] << "," << global_coeffs[0] << "," << global_coeffs[1] << "," << global_coeffs[2] << "," << global_coeffs[3] << std::endl; } //std::cout << "Calculating loads finished" << std::endl; } void pfem2Solver::setCudaConstants() { hipError_t err = hipSuccess; size_t pfem2ParticleSize = sizeof(pfem2Particle); err = hipMemcpyToSymbol(particleSize, &pfem2ParticleSize, sizeof(size_t)); if (err != hipSuccess) std::cout << hipGetErrorString(err) << std::endl; size_t locationOffset = offsetof(pfem2Particle, location); err = hipMemcpyToSymbol(locationPos, &locationOffset, sizeof(size_t)); if (err != hipSuccess) std::cout << hipGetErrorString(err) << std::endl; size_t refLocationOffset = offsetof(pfem2Particle, reference_location); err = hipMemcpyToSymbol(refLocationPos, &refLocationOffset, sizeof(size_t)); if (err != hipSuccess) std::cout << hipGetErrorString(err) << std::endl; size_t velocityOffset = offsetof(pfem2Particle, velocity); err = hipMemcpyToSymbol(velocityPos, &velocityOffset, sizeof(size_t)); if (err != hipSuccess) std::cout << hipGetErrorString(err) << std::endl; size_t velocityExtOffset = offsetof(pfem2Particle, velocity_ext); err = hipMemcpyToSymbol(velocityExtPos, &velocityExtOffset, sizeof(size_t)); if (err != hipSuccess) std::cout << hipGetErrorString(err) << std::endl; size_t cellDofsOffset = offsetof(pfem2Particle, cell_dofs); err = hipMemcpyToSymbol(cellDoFsPos, &cellDofsOffset, sizeof(size_t)); if (err != hipSuccess) std::cout << hipGetErrorString(err) << std::endl; }
761f8880aeb366a421a4b925913d0e9727244ffe.cu
#include "pfem2particle.h" #include <iostream> #include <fstream> #include <deal.II/fe/fe.h> #include <deal.II/fe/fe_q.h> #include <deal.II/fe/fe_system.h> #include <deal.II/fe/fe_values.h> #include <deal.II/fe/fe_nothing.h> #include <deal.II/fe/fe_poly.h> #include <deal.II/grid/grid_tools.h> #include <deal.II/particles/particle_iterator.h> #include <deal.II/particles/particle_handler.h> #include <deal.II/dofs/dof_handler.h> #include <deal.II/dofs/dof_accessor.h> #include <deal.II/lac/vector.h> #include <deal.II/numerics/vector_tools.h> #include <deal.II/base/quadrature_lib.h> #include <deal.II/base/geometry_info.h> #include <deal.II/base/function.h> #include <deal.II/base/tensor.h> #include <deal.II/grid/tria_accessor.h> #include <deal.II/lac/precondition.h> #include <deal.II/lac/solver_gmres.h> #include "omp.h" using namespace dealii; pfem2Particle::pfem2Particle(const Point<2> & location,const Point<2> & reference_location,const unsigned id) : id (id) { this->location[0] = location[0]; this->location[1] = location[1]; this->reference_location[0] = reference_location[0]; this->reference_location[1] = reference_location[1]; this->velocity[0] = 0.0; this->velocity[1] = 0.0; } pfem2Particle::pfem2Particle() : pfem2Particle(Point<2>(), Point<2>(), 0) { } void pfem2Particle::set_location (const Point<2> &new_location) { location[0] = new_location[0]; location[1] = new_location[1]; } const Point<2> pfem2Particle::get_location () const { return Point<2>(location[0], location[1]); } void pfem2Particle::set_reference_location (const Point<2> &new_reference_location) { reference_location[0] = new_reference_location[0]; reference_location[1] = new_reference_location[1]; } const Point<2> pfem2Particle::get_reference_location () const { return Point<2>(reference_location[0], reference_location[1]); } unsigned int pfem2Particle::get_id () const { return id; } void pfem2Particle::set_cell_dofs(const typename DoFHandler<2>::active_cell_iterator &cell) { for(unsigned int i = 0; i < GeometryInfo<2>::vertices_per_cell; ++i) cell_dofs[i] = cell->vertex_dof_index(i, 0); } void pfem2Particle::set_tria_position(const int &new_position) { tria_position = new_position; } int pfem2Particle::get_tria_position() const { return tria_position; } const Tensor<1,2> pfem2Particle::get_velocity() const { return Tensor<1,2>({velocity[0],velocity[1]}); } const Tensor<1,2> pfem2Particle::get_velocity_ext() const { return Tensor<1,2>({velocity_ext[0],velocity_ext[1]}); } double pfem2Particle::get_velocity_component(int component) const { return velocity[component]; } void pfem2Particle::set_velocity (const Tensor<1,2> &new_velocity) { velocity[0] = new_velocity[0]; velocity[1] = new_velocity[1]; } void pfem2Particle::set_velocity_component (const double value, int component) { velocity[component] = value; } void pfem2Particle::set_velocity_ext (const Tensor<1,2> &new_ext_velocity) { velocity_ext[0] = new_ext_velocity[0]; velocity_ext[1] = new_ext_velocity[1]; } Triangulation<2>::cell_iterator pfem2Particle::get_surrounding_cell(const Triangulation<2> &triangulation) const { const typename Triangulation<2>::cell_iterator cell(&triangulation, triangulation.n_levels() - 1, tria_position); return cell; } DoFHandler<2>::cell_iterator pfem2Particle::get_surrounding_cell(const Triangulation<2> &triangulation, const DoFHandler<2> &dof_handler) const { const typename DoFHandler<2>::cell_iterator cell(&triangulation, triangulation.n_levels() - 1, tria_position, &dof_handler); return cell; } unsigned int pfem2Particle::find_closest_vertex_of_cell(const typename Triangulation<2>::active_cell_iterator &cell, const Mapping<2> &mapping) { //transformation of local particle coordinates transformation is required as the global particle coordinates have already been updated by the time this function is called const Point<2> old_position = mapping.transform_unit_to_real_cell(cell, get_reference_location()); Tensor<1,2> velocity_normalized = get_velocity_ext() / get_velocity_ext().norm(); Tensor<1,2> particle_to_vertex = cell->vertex(0) - old_position; particle_to_vertex /= particle_to_vertex.norm(); double maximum_angle = velocity_normalized * particle_to_vertex; unsigned int closest_vertex = 0; for (unsigned int v = 1; v < GeometryInfo<2>::vertices_per_cell; ++v){ particle_to_vertex = cell->vertex(v) - old_position; particle_to_vertex /= particle_to_vertex.norm(); const double v_angle = velocity_normalized * particle_to_vertex; if (v_angle > maximum_angle){ closest_vertex = v; maximum_angle = v_angle; } } return closest_vertex; } std::size_t pfem2Particle::serialized_size_in_bytes() const { std::size_t size = sizeof(id) + sizeof(location) + sizeof(reference_location) + sizeof(tria_position) + sizeof(velocity) + sizeof(velocity_ext); return size; } pfem2ParticleHandler::pfem2ParticleHandler(const parallel::distributed::Triangulation<2> &tria, const Mapping<2> &coordMapping) : triangulation(&tria, typeid(*this).name()) , mapping(&coordMapping, typeid(*this).name()) , particles() {} pfem2ParticleHandler::~pfem2ParticleHandler() { clear_particles(); } void pfem2ParticleHandler::initialize_maps() { vertex_to_cells = std::vector<std::set<typename Triangulation<2>::active_cell_iterator>>(GridTools::vertex_to_cell_map(*triangulation)); vertex_to_cell_centers = std::vector<std::vector<Tensor<1,2>>>(GridTools::vertex_to_cell_centers_directions(*triangulation,vertex_to_cells)); } void pfem2ParticleHandler::clear_particles() { particles.clear(); } std::vector<pfem2Particle>::iterator pfem2ParticleHandler::remove_particle(std::vector<pfem2Particle>::iterator particleIndex) { return particles.erase(particleIndex); } void pfem2ParticleHandler::insert_particle(pfem2Particle &particle, const typename DoFHandler<2>::active_cell_iterator &cell) { particle.set_tria_position(cell->index()); particle.set_cell_dofs(cell); particles.push_back(particle); } unsigned int pfem2ParticleHandler::n_global_particles() const { return particles.size(); } bool compare_particle_association(const unsigned int a, const unsigned int b, const Tensor<1,2> &particle_direction, const std::vector<Tensor<1,2> > &center_directions) { const double scalar_product_a = center_directions[a] * particle_direction; const double scalar_product_b = center_directions[b] * particle_direction; return scalar_product_a > scalar_product_b; } void pfem2ParticleHandler::sort_particles_into_subdomains_and_cells(const DoFHandler<2> &dof_handler) { //std::cout << "Sorting particles" << std::endl; for(auto it = begin(); it != end(); ){ const typename Triangulation<2>::cell_iterator cell = (*it).get_surrounding_cell(*triangulation); bool found_cell = false; try{ const Point<2> p_unit = mapping->transform_real_to_unit_cell(cell, (*it).get_location()); if(GeometryInfo<2>::is_inside_unit_cell(p_unit)){ (*it).set_reference_location(p_unit); found_cell = true; ++it; } } catch(typename Mapping<2>::ExcTransformationFailed &){ #ifdef VERBOSE_OUTPUT std::cout << "Transformation failed for particle with global coordinates " << (*it).get_location() << " (checked cell index #" << cell->index() << ")" << std::endl; #endif // VERBOSE_OUTPUT } if(!found_cell){ std::vector<unsigned int> neighbor_permutation; Point<2> current_reference_position; typename Triangulation<2>::active_cell_iterator current_cell = (*it).get_surrounding_cell(*triangulation); const unsigned int closest_vertex = (*it).find_closest_vertex_of_cell(current_cell, *mapping); Tensor<1,2> vertex_to_particle = (*it).get_location() - current_cell->vertex(closest_vertex); vertex_to_particle /= vertex_to_particle.norm(); const unsigned int closest_vertex_index = current_cell->vertex_index(closest_vertex); const unsigned int n_neighbor_cells = vertex_to_cells[closest_vertex_index].size(); neighbor_permutation.resize(n_neighbor_cells); for (unsigned int i=0; i<n_neighbor_cells; ++i) neighbor_permutation[i] = i; std::sort(neighbor_permutation.begin(), neighbor_permutation.end(), std::bind(&compare_particle_association, std::placeholders::_1, std::placeholders::_2, std::cref(vertex_to_particle), std::cref(vertex_to_cell_centers[closest_vertex_index]))); for (unsigned int i=0; i<n_neighbor_cells; ++i){ typename std::set<typename Triangulation<2>::active_cell_iterator>::const_iterator cell = vertex_to_cells[closest_vertex_index].begin(); std::advance(cell, neighbor_permutation[i]); try{ const Point<2> p_unit = mapping->transform_real_to_unit_cell(*cell, (*it).get_location()); if (GeometryInfo<2>::is_inside_unit_cell(p_unit)){ current_cell = *cell; (*it).set_reference_location(p_unit); (*it).set_tria_position(current_cell->index()); const typename DoFHandler<2>::cell_iterator dofCell(triangulation, triangulation->n_levels() - 1, current_cell->index(), &dof_handler); (*it).set_cell_dofs(dofCell); found_cell = true; break; } } catch(typename Mapping<2>::ExcTransformationFailed &) { } } if (!found_cell){ *it = std::move(particles.back()); particles.pop_back(); } else ++it; } } } std::vector<pfem2Particle>::iterator pfem2ParticleHandler::begin() { return particles.begin(); } std::vector<pfem2Particle>::iterator pfem2ParticleHandler::end() { return particles.end(); } pfem2Solver::pfem2Solver() : mpi_communicator (MPI_COMM_WORLD), tria(mpi_communicator,Triangulation<2>::maximum_smoothing), particle_handler(tria, mapping), feV (1), feP (1), fe(FE_Q<2>(1), 1), dof_handlerV (tria), dof_handlerP (tria), quadrature_formula(2), face_quadrature_formula(2), feV_values (feV, quadrature_formula, update_values | update_gradients | update_quadrature_points | update_JxW_values), feP_values (feP, quadrature_formula, update_values | update_gradients | update_quadrature_points | update_JxW_values), feV_face_values (feV, face_quadrature_formula, update_values | update_quadrature_points | update_gradients | update_normal_vectors | update_JxW_values), feP_face_values (feP, face_quadrature_formula, update_values | update_quadrature_points | update_gradients | update_normal_vectors | update_JxW_values), dofs_per_cellV (feV.dofs_per_cell), dofs_per_cellP (feP.dofs_per_cell), local_dof_indicesV (dofs_per_cellV), local_dof_indicesP (dofs_per_cellP), n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)), this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)), pcout (std::cout,(this_mpi_process == 0)), n_q_points (quadrature_formula.size()), n_face_q_points (face_quadrature_formula.size()), quantities({0,0}) { setCudaConstants(); } pfem2Solver::~pfem2Solver() { } void pfem2Solver::seed_particles_into_cell (const typename DoFHandler<2>::cell_iterator &cell) { double hx = 1.0/quantities[0]; double hy = 1.0/quantities[1]; double shapeValue; for(unsigned int i = 0; i < quantities[0]; ++i){ for(unsigned int j = 0; j < quantities[1]; ++j){ pfem2Particle particle(mapping.transform_unit_to_real_cell(cell, Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy)), Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy), ++particleCount); for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex){ shapeValue = fe.shape_value(vertex, particle.get_reference_location()); particle.set_velocity_component(particle.get_velocity_component(0) + shapeValue * locally_relevant_solutionVx(cell->vertex_dof_index(vertex, 0)), 0); particle.set_velocity_component(particle.get_velocity_component(1) + shapeValue * locally_relevant_solutionVy(cell->vertex_dof_index(vertex, 0)), 1); }//vertex particle_handler.insert_particle(particle, cell); } } } bool pfem2Solver::check_cells_for_empty_parts () { bool res = false; std::map<int, std::map<std::vector<unsigned int>, unsigned int>> particlesInCellParts; std::vector<std::vector<pfem2Particle>::iterator> particles_to_be_deleted; //определение, в каких частях ячейки лежат частицы double hx = 1.0/quantities[0]; double hy = 1.0/quantities[1]; unsigned int num_x, num_y; for(auto particleIndex = particle_handler.begin(); particleIndex != particle_handler.end(); ){ num_x = (*particleIndex).get_reference_location()(0)/hx; num_y = (*particleIndex).get_reference_location()(1)/hy; if(particlesInCellParts[(*particleIndex).get_tria_position()][{num_x,num_y}] > MAX_PARTICLES_PER_CELL_PART){ *particleIndex = std::move(particle_handler.particles.back()); particle_handler.particles.pop_back(); res = true; } else { particlesInCellParts[(*particleIndex).get_tria_position()][{num_x,num_y}]++; ++particleIndex; } } double shapeValue; //проверка каждой части ячейки на количество частиц: при 0 - подсевание 1 частицы в центр for(auto cellInfo = particlesInCellParts.begin(); cellInfo != particlesInCellParts.end(); ++cellInfo){ const DoFHandler<2>::cell_iterator cell(&tria, tria.n_levels() - 1, (*cellInfo).first, &dof_handlerV); for(unsigned int i = 0; i < quantities[0]; i++) for(unsigned int j = 0; j < quantities[1]; j++) if((*cellInfo).second[{i,j}] == 0){ pfem2Particle particle(mapping.transform_unit_to_real_cell(cell, Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy)), Point<2>((i + 1.0/2)*hx, (j+1.0/2)*hy), ++particleCount); for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex){ shapeValue = fe.shape_value(vertex, particle.get_reference_location()); particle.set_velocity_component(particle.get_velocity_component(0) + shapeValue * locally_relevant_solutionVx(cell->vertex_dof_index(vertex, 0)), 0); particle.set_velocity_component(particle.get_velocity_component(1) + shapeValue * locally_relevant_solutionVy(cell->vertex_dof_index(vertex, 0)), 1); }//vertex particle_handler.insert_particle(particle, cell); res = true; } } //удаление лишних частиц //for(std::vector<std::vector<pfem2Particle>::iterator>::reverse_iterator it = particles_to_be_deleted.rbegin(); it != particles_to_be_deleted.rend(); ++it) particle_handler.remove_particle(*it); //if(!particles_to_be_deleted.empty()) res = true; return res; } void pfem2Solver::seed_particles(const std::vector < unsigned int > & quantities) { TimerOutput::Scope timer_section(*timer, "Particles' seeding"); if(quantities.size() < 2){ return; } this->quantities = quantities; typename DoFHandler<2>::cell_iterator cell = dof_handlerV.begin(tria.n_levels()-1), endc = dof_handlerV.end(tria.n_levels()-1); for (; cell != endc; ++cell) if (cell->is_locally_owned()) seed_particles_into_cell(cell); std::cout << "Created and placed " << particleCount << " particles" << std::endl; std::cout << "Particle handler contains " << particle_handler.n_global_particles() << " particles" << std::endl; } __global__ void correct_particle_velocities_cuda (const unsigned int Numparticles, pfem2Particle *particles, const double *deltaVx, const double *deltaVy) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (i < Numparticles){ double shapeValue; double vel_in_partVx = 0.0; double vel_in_partVy = 0.0; double *velocity = (double*)((char*)&particles[0] + i * particleSize + velocityPos); double *refLocation = (double*)((char*)&particles[0] + i * particleSize + refLocationPos); int *cellDofs = (int*)((char*)&particles[0] + i * particleSize + cellDoFsPos); shapeValue = (1 - refLocation[0]) * (1 - refLocation[1]); vel_in_partVx += shapeValue * deltaVx[cellDofs[0]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[0]]; shapeValue = refLocation[0] * (1 - refLocation[1]); vel_in_partVx += shapeValue * deltaVx[cellDofs[1]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[1]]; shapeValue = (1 - refLocation[0]) * refLocation[1]; vel_in_partVx += shapeValue * deltaVx[cellDofs[2]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[2]]; shapeValue = refLocation[0] * refLocation[1]; vel_in_partVx += shapeValue * deltaVx[cellDofs[3]]; vel_in_partVy += shapeValue * deltaVy[cellDofs[3]]; velocity[0] += vel_in_partVx; velocity[1] += vel_in_partVy; *(((char*)&particles[0] + i * particleSize + velocityPos)) = *velocity; } } void pfem2Solver::correct_particles_velocities() { TimerOutput::Scope timer_section(*timer, "Particles' velocities correction"); cudaError_t err = cudaSuccess; //Mesh node velocity difference vectors size_t meshVectorSize = dof_handlerV.n_dofs() * sizeof(double); //Vx double *hostDeltaVx = (double *)malloc(meshVectorSize); if (hostDeltaVx == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostDeltaVx[i] = locally_relevant_solutionVx[i] - locally_relevant_old_solutionVx[i]; double *deviceDeltaVx = NULL; err = cudaMalloc((void **)&deviceDeltaVx, meshVectorSize); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector deviceDeltaVx (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceDeltaVx, hostDeltaVx, meshVectorSize, cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector hostDeltaVx from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Vy double *hostDeltaVy = (double *)malloc(meshVectorSize); if (hostDeltaVy == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostDeltaVy[i] = locally_relevant_solutionVy[i] - locally_relevant_old_solutionVy[i]; double *deviceDeltaVy = NULL; err = cudaMalloc((void **)&deviceDeltaVy, meshVectorSize); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector deviceDeltaVy (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceDeltaVy, hostDeltaVy, meshVectorSize, cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector hostDeltaVy from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } dim3 grid_dim, block_dim; constexpr unsigned int particles_per_block = CUDAWrappers::warp_size; const double apply_n_blocks = std::ceil(static_cast<double>(particle_handler.n_global_particles()) / static_cast<double>(particles_per_block)); const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks)); const unsigned int apply_y_n_blocks = std::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)); grid_dim = dim3(apply_x_n_blocks, apply_y_n_blocks); block_dim = dim3(particles_per_block); pfem2Particle *deviceParticles = NULL; err = cudaMalloc((void **)&deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector deviceParticles (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceParticles, particle_handler.particles.data(), particle_handler.n_global_particles() * sizeof(pfem2Particle), cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector hostParticles from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } correct_particle_velocities_cuda<<<grid_dim, block_dim>>>(particle_handler.n_global_particles(), deviceParticles, deviceDeltaVx, deviceDeltaVy); AssertCudaKernel (); err = cudaGetLastError(); if (err != cudaSuccess){ fprintf(stderr, "Failed to launch move_particles_cuda kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } timer->enter_subsection("Memory copy"); err = cudaMemcpy(particle_handler.particles.data(), deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle), cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector deviceParticles from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); cudaFree(deviceParticles); cudaFree(deviceDeltaVx); cudaFree(deviceDeltaVy); free(hostDeltaVx); free(hostDeltaVy); //std::cout << "Finished correcting particles' velocities" << std::endl; } __global__ void move_particles_cuda ( const unsigned int Numparticles, const double time_step, pfem2Particle *particles, const double *solutionVx, const double *solutionVy) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (i < Numparticles){ double shapeValue; double vel_in_partVx = 0.0; double vel_in_partVy = 0.0; double *location = (double*)((char*)&particles[0] + i * particleSize + locationPos); double *refLocation = (double*)((char*)&particles[0] + i * particleSize + refLocationPos); int *cellDofs = (int*)((char*)&particles[0] + i * particleSize + cellDoFsPos); //printf("Particle %d has coordinates (%f, %f)\n", i, location[0], location[1]); //printf("Particle %d has local coordinates (%f, %f)\n", i, refLocation[0], refLocation[1]); //printf("Particle %d has cell with dofs (%d, %d, %d, %d)\n", i, cellDofs[0], cellDofs[1], cellDofs[2], cellDofs[3]); shapeValue = (1 - refLocation[0]) * (1 - refLocation[1]); vel_in_partVx += shapeValue * solutionVx[cellDofs[0]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[0]]; shapeValue = refLocation[0] * (1 - refLocation[1]); vel_in_partVx += shapeValue * solutionVx[cellDofs[1]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[1]]; shapeValue = (1 - refLocation[0]) * refLocation[1]; vel_in_partVx += shapeValue * solutionVx[cellDofs[2]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[2]]; shapeValue = refLocation[0] * refLocation[1]; vel_in_partVx += shapeValue * solutionVx[cellDofs[3]]; vel_in_partVy += shapeValue * solutionVy[cellDofs[3]]; location[0] += time_step * vel_in_partVx; location[1] += time_step * vel_in_partVy; *((char*)&particles[0] + i * particleSize + locationPos) = *location; *((double*)((char*)&particles[0] + i * particleSize + velocityExtPos)) = vel_in_partVx; *((double*)((char*)&particles[0] + i * particleSize + velocityExtPos + sizeof(double))) = vel_in_partVy; } } void pfem2Solver::move_particles() //перенос частиц { TimerOutput::Scope timer_section(*timer, "Particles' movement"); double min_time_step = time_step / PARTICLES_MOVEMENT_STEPS; cudaError_t err = cudaSuccess; //Mesh node velocity vectors size_t meshVectorSize = dof_handlerV.n_dofs() * sizeof(double); //Vx double *hostSolutionVx = (double *)malloc(meshVectorSize); if (hostSolutionVx == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostSolutionVx[i] = locally_relevant_solutionVx[i]; double *deviceSolutionVx = NULL; err = cudaMalloc((void **)&deviceSolutionVx, meshVectorSize); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector deviceSolutionVx (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceSolutionVx, hostSolutionVx, meshVectorSize, cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector hostSolutionVx from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Vy double *hostSolutionVy = (double *)malloc(meshVectorSize); if (hostSolutionVy == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i) hostSolutionVy[i] = locally_relevant_solutionVy[i]; double *deviceSolutionVy = NULL; err = cudaMalloc((void **)&deviceSolutionVy, meshVectorSize); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector deviceSolutionVy (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceSolutionVy, hostSolutionVy, meshVectorSize, cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector hostSolutionVy from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for (int np_m = 0; np_m < PARTICLES_MOVEMENT_STEPS; ++np_m) { timer->enter_subsection("Variables preparation"); dim3 grid_dim, block_dim; constexpr unsigned int particles_per_block = CUDAWrappers::warp_size; const double apply_n_blocks = std::ceil(static_cast<double>(particle_handler.n_global_particles()) / static_cast<double>(particles_per_block)); const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks)); const unsigned int apply_y_n_blocks = std::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)); grid_dim = dim3(apply_x_n_blocks, apply_y_n_blocks); block_dim = dim3(particles_per_block); pfem2Particle *deviceParticles = NULL; err = cudaMalloc((void **)&deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector deviceParticles (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceParticles, particle_handler.particles.data(), particle_handler.n_global_particles() * sizeof(pfem2Particle), cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector hostParticles from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); timer->enter_subsection("CUDA step"); move_particles_cuda<<<grid_dim, block_dim>>>(particle_handler.n_global_particles(), min_time_step, deviceParticles, deviceSolutionVx, deviceSolutionVy); AssertCudaKernel (); err = cudaGetLastError(); if (err != cudaSuccess){ fprintf(stderr, "Failed to launch move_particles_cuda kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); timer->enter_subsection("Memory copy"); err = cudaMemcpy(particle_handler.particles.data(), deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle), cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector deviceParticles from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } timer->leave_subsection(); timer->enter_subsection("Particles' sorting"); particle_handler.sort_particles_into_subdomains_and_cells(dof_handlerV); timer->leave_subsection(); timer->enter_subsection("Memory release"); cudaFree(deviceParticles); timer->leave_subsection(); }//np_m //проверка наличия пустых ячеек (без частиц) и размещение в них частиц timer->enter_subsection("Checking cells for empty parts"); check_cells_for_empty_parts(); timer->leave_subsection(); //std::cout << "Finished moving particles" << std::endl; cudaFree(deviceSolutionVx); cudaFree(deviceSolutionVy); free(hostSolutionVx); free(hostSolutionVy); } __global__ void distribute_particle_velocities_cuda ( const unsigned int Numparticles, const pfem2Particle *particles, double *solutionVx, double *solutionVy, double *nodeWeights) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (i < Numparticles){ double shapeValue; double *velocity = (double*)((char*)&particles[0] + i * particleSize + velocityPos); double *refLocation = (double*)((char*)&particles[0] + i * particleSize + refLocationPos); int *cellDofs = (int*)((char*)&particles[0] + i * particleSize + cellDoFsPos); shapeValue = (1 - refLocation[0]) * (1 - refLocation[1]); atomicAdd(&solutionVx[cellDofs[0]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[0]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[0]], shapeValue); shapeValue = refLocation[0] * (1 - refLocation[1]); atomicAdd(&solutionVx[cellDofs[1]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[1]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[1]], shapeValue); shapeValue = (1 - refLocation[0]) * refLocation[1]; atomicAdd(&solutionVx[cellDofs[2]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[2]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[2]], shapeValue); shapeValue = refLocation[0] * refLocation[1]; atomicAdd(&solutionVx[cellDofs[3]], shapeValue * velocity[0]); atomicAdd(&solutionVy[cellDofs[3]], shapeValue * velocity[1]); atomicAdd(&nodeWeights[cellDofs[3]], shapeValue); } } __global__ void calculate_node_velocities_cuda ( const unsigned int dof_count, double *solutionVx, double *solutionVy, const double *nodeWeights) { int i = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if(i < dof_count){ solutionVx[i] /= nodeWeights[i]; solutionVy[i] /= nodeWeights[i]; //printf("Velocity for i=%d equals (%f, %f) with weight=%f\n", i, solutionVx[i], solutionVy[i], nodeWeights[i]); } } void pfem2Solver::distribute_particle_velocities_to_grid() //перенос скоростей частиц на узлы сетки { TimerOutput::Scope timer_section(*timer, "Distribution of particles' velocities to grid nodes"); cudaError_t err = cudaSuccess; dim3 grid_dim, block_dim; constexpr unsigned int particles_per_block = CUDAWrappers::warp_size; const double apply_n_blocks = std::ceil(static_cast<double>(particle_handler.n_global_particles()) / static_cast<double>(particles_per_block)); const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks)); const unsigned int apply_y_n_blocks = std::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)); grid_dim = dim3(apply_x_n_blocks, apply_y_n_blocks); block_dim = dim3(particles_per_block); pfem2Particle *deviceParticles = NULL; err = cudaMalloc((void **)&deviceParticles, particle_handler.n_global_particles() * sizeof(pfem2Particle)); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector deviceParticles (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceParticles, particle_handler.particles.data(), particle_handler.n_global_particles() * sizeof(pfem2Particle), cudaMemcpyHostToDevice); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector hostParticles from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } size_t meshVectorSize = dof_handlerV.n_dofs() * sizeof(double); double *deviceSolutionVx = NULL; err = cudaMalloc((void **)&deviceSolutionVx, meshVectorSize); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector deviceSolutionVx (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } double *deviceSolutionVy = NULL; err = cudaMalloc((void **)&deviceSolutionVy, meshVectorSize); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector deviceSolutionVy (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } double *deviceWeights = NULL; err = cudaMalloc((void **)&deviceWeights, meshVectorSize); if (err != cudaSuccess){ fprintf(stderr, "Failed to allocate device vector deviceSolutionVy (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } distribute_particle_velocities_cuda<<<grid_dim, block_dim>>>(particle_handler.n_global_particles(), deviceParticles, deviceSolutionVx, deviceSolutionVy, deviceWeights); calculate_node_velocities_cuda<<<grid_dim, block_dim>>>(dof_handlerV.n_dofs(), deviceSolutionVx, deviceSolutionVy, deviceWeights); AssertCudaKernel (); err = cudaGetLastError(); if (err != cudaSuccess){ fprintf(stderr, "Failed to launch move_particles_cuda kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } double *hostSolutionVx = (double *)malloc(meshVectorSize); if (hostSolutionVx == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } err = cudaMemcpy(hostSolutionVx, deviceSolutionVx, meshVectorSize, cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector deviceSolutionVx from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } double *hostSolutionVy = (double *)malloc(meshVectorSize); if (hostSolutionVy == NULL){ fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } err = cudaMemcpy(hostSolutionVy, deviceSolutionVy, meshVectorSize, cudaMemcpyDeviceToHost); if (err != cudaSuccess){ fprintf(stderr, "Failed to copy vector deviceSolutionVy from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for(unsigned int i = 0; i < dof_handlerV.n_dofs(); ++i){ locally_relevant_solutionVx[i] = hostSolutionVx[i]; locally_relevant_solutionVy[i] = hostSolutionVy[i]; } for(std::map<unsigned int, unsigned int>::iterator it = wallsAndBodyDoFs.begin(); it != wallsAndBodyDoFs.end(); ++it){ locally_relevant_solutionVx(it->first) = 0.0; locally_relevant_solutionVy(it->first) = 0.0; } locally_relevant_solutionVx.compress (VectorOperation::insert); locally_relevant_solutionVy.compress (VectorOperation::insert); cudaFree(deviceWeights); cudaFree(deviceParticles); cudaFree(deviceSolutionVx); cudaFree(deviceSolutionVy); free(hostSolutionVx); free(hostSolutionVy); //std::cout << "Finished distributing particles' velocities to grid" << std::endl; } void pfem2Solver::calculate_loads(types::boundary_id patch_id, std::ofstream *out){ TimerOutput::Scope timer_section(*timer, "Loads calculation"); double Fx_nu(0.0), Fx_p(0.0), Fy_nu(0.0), Fy_p(0.0), point_valueP, dVtdn, Cx_nu, Cx_p, Cy_nu, Cy_p; for(const auto &cell : dof_handlerP.active_cell_iterators()) if(cell->is_locally_owned()) for (unsigned int face_number=0; face_number < GeometryInfo<2>::faces_per_cell; ++face_number) if (cell->face(face_number)->at_boundary() && cell->face(face_number)->boundary_id() == patch_id) { feP_face_values.reinit (cell, face_number); for (unsigned int q_point=0; q_point < n_face_q_points; ++q_point) { point_valueP = 0.0; dVtdn = 0.0; for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex){ point_valueP += locally_relevant_solutionP(cell->vertex_dof_index(vertex,0)) * feP_face_values.shape_value(vertex, q_point); dVtdn += (locally_relevant_solutionVx(cell->vertex_dof_index(vertex,0)) * feP_face_values.normal_vector(q_point)[1] - locally_relevant_solutionVy(cell->vertex_dof_index(vertex,0)) * feP_face_values.normal_vector(q_point)[0]) * (feP_face_values.shape_grad(vertex, q_point)[0] * feP_face_values.normal_vector(q_point)[0] + feP_face_values.shape_grad(vertex, q_point)[1] * feP_face_values.normal_vector(q_point)[1]); }//vertex Fx_nu += mu * dVtdn * feP_face_values.normal_vector(q_point)[1] * feP_face_values.JxW (q_point); Fx_p -= point_valueP * feP_face_values.normal_vector(q_point)[0] * feP_face_values.JxW (q_point); Fy_nu -= mu * dVtdn * feP_face_values.normal_vector(q_point)[0] * feP_face_values.JxW (q_point); Fy_p -= point_valueP * feP_face_values.normal_vector(q_point)[1] * feP_face_values.JxW (q_point); }//q_index }//if Cx_nu = 2.0 * Fx_nu / (rho * uMean * uMean * diam); Cx_p = 2.0 * Fx_p / (rho * uMean * uMean * diam); Cy_nu = 2.0 * Fy_nu / (rho * uMean * uMean * diam); Cy_p = 2.0 * Fy_p / (rho * uMean * uMean * diam); //pressure difference double pressureDifference = 0.0; if(xaDoF != -100) pressureDifference += locally_relevant_solutionP(xaDoF); if(xeDoF != -100) pressureDifference -= locally_relevant_solutionP(xeDoF); const double local_coeffs[5] = { Cx_nu, Cx_p, Cy_nu, Cy_p, pressureDifference }; double global_coeffs[5]; Utilities::MPI::sum(local_coeffs, mpi_communicator, global_coeffs); if (this_mpi_process == 0){ double Cx = global_coeffs[0] + global_coeffs[1]; double Cy = global_coeffs[2] + global_coeffs[3]; *out << time << "," << Cx << "," << Cy << "," << global_coeffs[4] << "," << global_coeffs[0] << "," << global_coeffs[1] << "," << global_coeffs[2] << "," << global_coeffs[3] << std::endl; } //std::cout << "Calculating loads finished" << std::endl; } void pfem2Solver::setCudaConstants() { cudaError_t err = cudaSuccess; size_t pfem2ParticleSize = sizeof(pfem2Particle); err = cudaMemcpyToSymbol(particleSize, &pfem2ParticleSize, sizeof(size_t)); if (err != cudaSuccess) std::cout << cudaGetErrorString(err) << std::endl; size_t locationOffset = offsetof(pfem2Particle, location); err = cudaMemcpyToSymbol(locationPos, &locationOffset, sizeof(size_t)); if (err != cudaSuccess) std::cout << cudaGetErrorString(err) << std::endl; size_t refLocationOffset = offsetof(pfem2Particle, reference_location); err = cudaMemcpyToSymbol(refLocationPos, &refLocationOffset, sizeof(size_t)); if (err != cudaSuccess) std::cout << cudaGetErrorString(err) << std::endl; size_t velocityOffset = offsetof(pfem2Particle, velocity); err = cudaMemcpyToSymbol(velocityPos, &velocityOffset, sizeof(size_t)); if (err != cudaSuccess) std::cout << cudaGetErrorString(err) << std::endl; size_t velocityExtOffset = offsetof(pfem2Particle, velocity_ext); err = cudaMemcpyToSymbol(velocityExtPos, &velocityExtOffset, sizeof(size_t)); if (err != cudaSuccess) std::cout << cudaGetErrorString(err) << std::endl; size_t cellDofsOffset = offsetof(pfem2Particle, cell_dofs); err = cudaMemcpyToSymbol(cellDoFsPos, &cellDofsOffset, sizeof(size_t)); if (err != cudaSuccess) std::cout << cudaGetErrorString(err) << std::endl; }
2cabc5ecc190e27d88d13cb567912bcb7d978ccb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../image_headers/threshold.cuh" #include <iostream> #include <cmath> #include <cstdio> __global__ void threshold_kernel(unsigned char* image, unsigned char* output, size_t width, size_t height, float low_threshold, float high_threshold, int strong, int weak) { int output_index = blockIdx.x * blockDim.x + threadIdx.x; int x = output_index % width; int y = output_index / width; if ((x > 0) && (x<height)) { if ((y > 0) && (y < height)) { if (image[output_index] > high_threshold) { output[output_index] = strong; } else if (image[output_index] > low_threshold) { output[output_index] = weak; } else { output[output_index] = 0; } } else { output[output_index] = 0; } } else { output[output_index] = 0; } }
2cabc5ecc190e27d88d13cb567912bcb7d978ccb.cu
#include "../image_headers/threshold.cuh" #include <iostream> #include <cmath> #include <cstdio> __global__ void threshold_kernel(unsigned char* image, unsigned char* output, size_t width, size_t height, float low_threshold, float high_threshold, int strong, int weak) { int output_index = blockIdx.x * blockDim.x + threadIdx.x; int x = output_index % width; int y = output_index / width; if ((x > 0) && (x<height)) { if ((y > 0) && (y < height)) { if (image[output_index] > high_threshold) { output[output_index] = strong; } else if (image[output_index] > low_threshold) { output[output_index] = weak; } else { output[output_index] = 0; } } else { output[output_index] = 0; } } else { output[output_index] = 0; } }
aec54a33b3c99f81b176ad11368ed1e812de1610.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * neighborlist.cu * * Created on: Sep 4, 2016 * Author: uwe */ #include "neighborlist_modes.h" #include "nativeTypesWrapper.h" #include "DeviceNLGrid.h" #include "DeviceProtein.h" #include "DeviceParamTable.h" #include "SimParam.h" #include "forcefield.h" #include "macros.h" #include "Types_6D_Modes.h" namespace as { /* * In comparison to the NL force calculation without modes, this methode takes the deformed coordinates of the receptor instead of its original position, * which does not contain mode deformation. * Note that if the forces that are acting on the receptor are calculated, they have to be rotated back into the system of the receptor. */ template<typename REAL, typename DOF_T> __global__ void d_NLPotForce( const d_NLGrid<REAL> grid, const d_Protein<REAL> rec, const d_Protein<REAL> lig, const d_ParamTable<REAL> table, const SimParam<REAL> simParam, const unsigned numDOFs, const REAL* RecPosX, const REAL* RecPosY, const REAL* RecPosZ, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { using real3_t = typename TypeWrapper<REAL>::real3_t; const unsigned i = blockDim.x * blockIdx.x + threadIdx.x; const unsigned LigNumEl = lig.numAtoms; if (i < LigNumEl*numDOFs) { const unsigned LigAttrIdx = i % LigNumEl; unsigned recBase = 0; const unsigned atomTypeLig = lig.type[LigAttrIdx]; if (atomTypeLig != 0) { const REAL posLigX = LigPosX[i]; const REAL posLigY = LigPosY[i]; const REAL posLigZ = LigPosZ[i]; /* test if particle is out of bounds and perform data fetch and neigbourlist calculations */ if (!( (posLigX < grid.minDim.x || posLigX > grid.maxDim.x) || (posLigY < grid.minDim.y || posLigY > grid.maxDim.y) || (posLigZ < grid.minDim.z || posLigZ > grid.maxDim.z) )) { const uint2 nDesc = tex3D<uint2>(grid.tex, (posLigX - grid.minDim.x) * grid.dVox_inv + 0.5, (posLigY - grid.minDim.y) * grid.dVox_inv + 0.5, (posLigZ - grid.minDim.z) * grid.dVox_inv + 0.5); /* numEl = x; idx = y */ real3_t fAcc = {0,0,0}; REAL eAcc = 0; for (unsigned j = 0; j < nDesc.x; ++j) { const unsigned nIdx = grid.neighborList[nDesc.y + j]; REAL xRec, yRec, zRec; if( std::is_same<DOF_T, DOF_6D_Modes<REAL>>::value ){ recBase = rec.numAtoms * (int) (i / LigNumEl); xRec = RecPosX[nIdx + recBase]; yRec = RecPosY[nIdx + recBase]; zRec = RecPosZ[nIdx + recBase]; } else{ xRec = rec.xPos[nIdx]; yRec = rec.yPos[nIdx]; zRec = rec.zPos[nIdx]; } REAL dx = posLigX - xRec; REAL dy = posLigY - yRec; REAL dz = posLigZ - zRec; const REAL dr2 = dx * dx + dy * dy + dz * dz; const REAL dPlateau2 = grid.dPlateau2; if ((dr2) > dPlateau2) { continue; } constexpr REAL one = static_cast<REAL>(1.0); const REAL dr2_inv = one/dr2; // inverse of dr2 // Scale distances dx *= dr2_inv; dy *= dr2_inv; dz *= dr2_inv; real3_t fVdW; REAL eVdW; const size_t atomTypeRec = rec.type[nIdx]; // calculate energy and potential/energy of LJ/VdW potential auto const params = table.getParams(atomTypeRec-1, atomTypeLig-1); LJPotForce(dr2, dr2_inv, dx, dy, dz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x += fVdW.x; fAcc.y += fVdW.y; fAcc.z += fVdW.z; eAcc += eVdW; const REAL chargeLig = lig.charge[LigAttrIdx]; const REAL chargeRec = rec.charge[nIdx]; const REAL chargeLigRec = chargeLig * chargeRec * simParam.ffelec; const bool calc_elec = abs(chargeLigRec) > 0.001; // evaluate electric potential REAL dPlateau2_inv = 1/grid.dPlateau2; const REAL ratio = sqrt(dr2*dPlateau2_inv); REAL rdx = ratio*dx; REAL rdy = ratio*dy; REAL rdz = ratio*dz; LJPotForce(dPlateau2, dPlateau2_inv, rdx, rdy, rdz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x -= fVdW.x; fAcc.y -= fVdW.y; fAcc.z -= fVdW.z; eAcc -= eVdW; if (calc_elec) { REAL eEl; real3_t fEl; // calculate energy and potential/energy of charge potential ChargePotForce(dr2_inv, dx, dy, dz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x += fEl.x; fAcc.y += fEl.y; fAcc.z += fEl.z; eAcc += eEl; ChargePotForce(dPlateau2_inv, rdx, rdy, rdz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x -= fEl.x; fAcc.y -= fEl.y; fAcc.z -= fEl.z; eAcc -= eEl; } } /* store results back to global memory */ if (nDesc.x > 0) { outLig_fx[i] += fAcc.x; outLig_fy[i] += fAcc.y; outLig_fz[i] += fAcc.z; outLigand_E[i] += eAcc; } } } // if (atomtype != 0) } } template<typename REAL, typename DOF_T> void d_NLPotForce( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<REAL>& grid, const d_Protein<REAL>& rec, const d_Protein<REAL>& lig, const d_ParamTable<REAL>& table, const SimParam<REAL>& simParam, const unsigned& numDOFs, const REAL* RecPosX, const REAL* RecPosY, const REAL* RecPosZ, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { cudaVerifyKernel(( hipLaunchKernelGGL(( d_NLPotForce<REAL,DOF_T>), dim3(gridSize), dim3(blockSize), 0, stream, grid, rec, lig, table, simParam, numDOFs, RecPosX, RecPosY, RecPosZ, LigPosX, LigPosY, LigPosZ, outLig_fx, outLig_fy, outLig_fz, outLigand_E ) )); } template void d_NLPotForce<float, DOF_6D_Modes<float>>( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<float>& grid, const d_Protein<float>& rec, const d_Protein<float>& lig, const d_ParamTable<float>& table, const SimParam<float>& simParam, const unsigned& numDOFs, const float* RecPosX, const float* RecPosY, const float* RecPosZ, const float* LigPosX, const float* LigPosY, const float* LigPosZ, float* outLig_fx, float* outLig_fy, float* outLig_fz, float* outLigand_E ); template void d_NLPotForce<double, DOF_6D_Modes<double>>( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<double>& grid, const d_Protein<double>& rec, const d_Protein<double>& lig, const d_ParamTable<double>& table, const SimParam<double>& simParam, const unsigned& numDOFs, const double* RecPosX, const double* RecPosY, const double* RecPosZ, const double* LigPosX, const double* LigPosY, const double* LigPosZ, double* outLig_fx, double* outLig_fy, double* outLig_fz, double* outLigand_E ); template void d_NLPotForce<float, DOF_6D<float>>( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<float>& grid, const d_Protein<float>& rec, const d_Protein<float>& lig, const d_ParamTable<float>& table, const SimParam<float>& simParam, const unsigned& numDOFs, const float* RecPosX, const float* RecPosY, const float* RecPosZ, const float* LigPosX, const float* LigPosY, const float* LigPosZ, float* outLig_fx, float* outLig_fy, float* outLig_fz, float* outLigand_E ); template void d_NLPotForce<double, DOF_6D<double>>( unsigned blockSize, unsigned gridSize, const hipStream_t &stream, const d_NLGrid<double>& grid, const d_Protein<double>& rec, const d_Protein<double>& lig, const d_ParamTable<double>& table, const SimParam<double>& simParam, const unsigned& numDOFs, const double* RecPosX, const double* RecPosY, const double* RecPosZ, const double* LigPosX, const double* LigPosY, const double* LigPosZ, double* outLig_fx, double* outLig_fy, double* outLig_fz, double* outLigand_E ); } // namespace as
aec54a33b3c99f81b176ad11368ed1e812de1610.cu
/* * neighborlist.cu * * Created on: Sep 4, 2016 * Author: uwe */ #include "neighborlist_modes.h" #include "nativeTypesWrapper.h" #include "DeviceNLGrid.h" #include "DeviceProtein.h" #include "DeviceParamTable.h" #include "SimParam.h" #include "forcefield.h" #include "macros.h" #include "Types_6D_Modes.h" namespace as { /* * In comparison to the NL force calculation without modes, this methode takes the deformed coordinates of the receptor instead of its original position, * which does not contain mode deformation. * Note that if the forces that are acting on the receptor are calculated, they have to be rotated back into the system of the receptor. */ template<typename REAL, typename DOF_T> __global__ void d_NLPotForce( const d_NLGrid<REAL> grid, const d_Protein<REAL> rec, const d_Protein<REAL> lig, const d_ParamTable<REAL> table, const SimParam<REAL> simParam, const unsigned numDOFs, const REAL* RecPosX, const REAL* RecPosY, const REAL* RecPosZ, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { using real3_t = typename TypeWrapper<REAL>::real3_t; const unsigned i = blockDim.x * blockIdx.x + threadIdx.x; const unsigned LigNumEl = lig.numAtoms; if (i < LigNumEl*numDOFs) { const unsigned LigAttrIdx = i % LigNumEl; unsigned recBase = 0; const unsigned atomTypeLig = lig.type[LigAttrIdx]; if (atomTypeLig != 0) { const REAL posLigX = LigPosX[i]; const REAL posLigY = LigPosY[i]; const REAL posLigZ = LigPosZ[i]; /* test if particle is out of bounds and perform data fetch and neigbourlist calculations */ if (!( (posLigX < grid.minDim.x || posLigX > grid.maxDim.x) || (posLigY < grid.minDim.y || posLigY > grid.maxDim.y) || (posLigZ < grid.minDim.z || posLigZ > grid.maxDim.z) )) { const uint2 nDesc = tex3D<uint2>(grid.tex, (posLigX - grid.minDim.x) * grid.dVox_inv + 0.5, (posLigY - grid.minDim.y) * grid.dVox_inv + 0.5, (posLigZ - grid.minDim.z) * grid.dVox_inv + 0.5); /* numEl = x; idx = y */ real3_t fAcc = {0,0,0}; REAL eAcc = 0; for (unsigned j = 0; j < nDesc.x; ++j) { const unsigned nIdx = grid.neighborList[nDesc.y + j]; REAL xRec, yRec, zRec; if( std::is_same<DOF_T, DOF_6D_Modes<REAL>>::value ){ recBase = rec.numAtoms * (int) (i / LigNumEl); xRec = RecPosX[nIdx + recBase]; yRec = RecPosY[nIdx + recBase]; zRec = RecPosZ[nIdx + recBase]; } else{ xRec = rec.xPos[nIdx]; yRec = rec.yPos[nIdx]; zRec = rec.zPos[nIdx]; } REAL dx = posLigX - xRec; REAL dy = posLigY - yRec; REAL dz = posLigZ - zRec; const REAL dr2 = dx * dx + dy * dy + dz * dz; const REAL dPlateau2 = grid.dPlateau2; if ((dr2) > dPlateau2) { continue; } constexpr REAL one = static_cast<REAL>(1.0); const REAL dr2_inv = one/dr2; // inverse of dr2 // Scale distances dx *= dr2_inv; dy *= dr2_inv; dz *= dr2_inv; real3_t fVdW; REAL eVdW; const size_t atomTypeRec = rec.type[nIdx]; // calculate energy and potential/energy of LJ/VdW potential auto const params = table.getParams(atomTypeRec-1, atomTypeLig-1); LJPotForce(dr2, dr2_inv, dx, dy, dz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x += fVdW.x; fAcc.y += fVdW.y; fAcc.z += fVdW.z; eAcc += eVdW; const REAL chargeLig = lig.charge[LigAttrIdx]; const REAL chargeRec = rec.charge[nIdx]; const REAL chargeLigRec = chargeLig * chargeRec * simParam.ffelec; const bool calc_elec = abs(chargeLigRec) > 0.001; // evaluate electric potential REAL dPlateau2_inv = 1/grid.dPlateau2; const REAL ratio = sqrt(dr2*dPlateau2_inv); REAL rdx = ratio*dx; REAL rdy = ratio*dy; REAL rdz = ratio*dz; LJPotForce(dPlateau2, dPlateau2_inv, rdx, rdy, rdz, params, one, table.shape, fVdW.x, fVdW.y, fVdW.z, eVdW); fAcc.x -= fVdW.x; fAcc.y -= fVdW.y; fAcc.z -= fVdW.z; eAcc -= eVdW; if (calc_elec) { REAL eEl; real3_t fEl; // calculate energy and potential/energy of charge potential ChargePotForce(dr2_inv, dx, dy, dz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x += fEl.x; fAcc.y += fEl.y; fAcc.z += fEl.z; eAcc += eEl; ChargePotForce(dPlateau2_inv, rdx, rdy, rdz, chargeLigRec, one, simParam.dielec, fEl.x, fEl.y, fEl.z, eEl); fAcc.x -= fEl.x; fAcc.y -= fEl.y; fAcc.z -= fEl.z; eAcc -= eEl; } } /* store results back to global memory */ if (nDesc.x > 0) { outLig_fx[i] += fAcc.x; outLig_fy[i] += fAcc.y; outLig_fz[i] += fAcc.z; outLigand_E[i] += eAcc; } } } // if (atomtype != 0) } } template<typename REAL, typename DOF_T> void d_NLPotForce( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<REAL>& grid, const d_Protein<REAL>& rec, const d_Protein<REAL>& lig, const d_ParamTable<REAL>& table, const SimParam<REAL>& simParam, const unsigned& numDOFs, const REAL* RecPosX, const REAL* RecPosY, const REAL* RecPosZ, const REAL* LigPosX, const REAL* LigPosY, const REAL* LigPosZ, REAL* outLig_fx, REAL* outLig_fy, REAL* outLig_fz, REAL* outLigand_E) { cudaVerifyKernel(( d_NLPotForce<REAL,DOF_T><<<gridSize, blockSize, 0, stream>>> ( grid, rec, lig, table, simParam, numDOFs, RecPosX, RecPosY, RecPosZ, LigPosX, LigPosY, LigPosZ, outLig_fx, outLig_fy, outLig_fz, outLigand_E ) )); } template void d_NLPotForce<float, DOF_6D_Modes<float>>( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<float>& grid, const d_Protein<float>& rec, const d_Protein<float>& lig, const d_ParamTable<float>& table, const SimParam<float>& simParam, const unsigned& numDOFs, const float* RecPosX, const float* RecPosY, const float* RecPosZ, const float* LigPosX, const float* LigPosY, const float* LigPosZ, float* outLig_fx, float* outLig_fy, float* outLig_fz, float* outLigand_E ); template void d_NLPotForce<double, DOF_6D_Modes<double>>( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<double>& grid, const d_Protein<double>& rec, const d_Protein<double>& lig, const d_ParamTable<double>& table, const SimParam<double>& simParam, const unsigned& numDOFs, const double* RecPosX, const double* RecPosY, const double* RecPosZ, const double* LigPosX, const double* LigPosY, const double* LigPosZ, double* outLig_fx, double* outLig_fy, double* outLig_fz, double* outLigand_E ); template void d_NLPotForce<float, DOF_6D<float>>( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<float>& grid, const d_Protein<float>& rec, const d_Protein<float>& lig, const d_ParamTable<float>& table, const SimParam<float>& simParam, const unsigned& numDOFs, const float* RecPosX, const float* RecPosY, const float* RecPosZ, const float* LigPosX, const float* LigPosY, const float* LigPosZ, float* outLig_fx, float* outLig_fy, float* outLig_fz, float* outLigand_E ); template void d_NLPotForce<double, DOF_6D<double>>( unsigned blockSize, unsigned gridSize, const cudaStream_t &stream, const d_NLGrid<double>& grid, const d_Protein<double>& rec, const d_Protein<double>& lig, const d_ParamTable<double>& table, const SimParam<double>& simParam, const unsigned& numDOFs, const double* RecPosX, const double* RecPosY, const double* RecPosZ, const double* LigPosX, const double* LigPosY, const double* LigPosZ, double* outLig_fx, double* outLig_fy, double* outLig_fz, double* outLigand_E ); } // namespace as
1d8b9fa0deb04fc13de1c3c0e1861d8c8c6d546a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaDRectifier_propagate_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double *y = NULL; hipMalloc(&y, XSIZE*YSIZE); unsigned int size = 1; double leakSlope = 1; int shifting = 1; double clipping = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaDRectifier_propagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size,leakSlope,shifting,clipping); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaDRectifier_propagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size,leakSlope,shifting,clipping); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaDRectifier_propagate_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size,leakSlope,shifting,clipping); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1d8b9fa0deb04fc13de1c3c0e1861d8c8c6d546a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaDRectifier_propagate_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); unsigned int size = 1; double leakSlope = 1; int shifting = 1; double clipping = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaDRectifier_propagate_kernel<<<gridBlock,threadBlock>>>(x,y,size,leakSlope,shifting,clipping); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaDRectifier_propagate_kernel<<<gridBlock,threadBlock>>>(x,y,size,leakSlope,shifting,clipping); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaDRectifier_propagate_kernel<<<gridBlock,threadBlock>>>(x,y,size,leakSlope,shifting,clipping); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dc860c7b798bec437b8d2190d5f44bf86a175f0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void incConst_gpu( const float *in, float *out, const int *variables) { float H; if (*variables & 1) { out[0] += *in; out[3] += *in; } if (*variables & 2) { H = out[0] > EPS_cuda ? out[0] : EPS_cuda; out[1] += *in * H; } if (*variables & 4) { H = out[0] > EPS_cuda ? out[0] : EPS_cuda; out[2] += *in * H; } if (*variables & 8) { out[3] += *in; } } // CUDA kernel function __global__ void op_cuda_incConst( const float *__restrict arg0, float *arg1, const int *arg2, int set_size ) { //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call incConst_gpu(arg0+n*1, arg1+n*4, arg2); } } //host stub function void op_par_loop_incConst(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2){ int*arg2h = (int *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(4); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: incConst"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OP_consts_h + consts_bytes; arg2.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((int *)arg2.data)[d] = arg2h[d]; } consts_bytes += ROUND_UP(1*sizeof(int)); mvConstArraysToDevice(consts_bytes); //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else int nthread = OP_block_size; #endif int nblocks = 200; hipLaunchKernelGGL(( op_cuda_incConst), dim3(nblocks),dim3(nthread), 0, 0, (float *) arg0.data_d, (float *) arg1.data_d, (int *) arg2.data_d, set->size ); } op_mpi_set_dirtybit_cuda(nargs, args); if (OP_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size * 2.0f; }
dc860c7b798bec437b8d2190d5f44bf86a175f0f.cu
// // auto-generated by op2.py // //user function __device__ void incConst_gpu( const float *in, float *out, const int *variables) { float H; if (*variables & 1) { out[0] += *in; out[3] += *in; } if (*variables & 2) { H = out[0] > EPS_cuda ? out[0] : EPS_cuda; out[1] += *in * H; } if (*variables & 4) { H = out[0] > EPS_cuda ? out[0] : EPS_cuda; out[2] += *in * H; } if (*variables & 8) { out[3] += *in; } } // CUDA kernel function __global__ void op_cuda_incConst( const float *__restrict arg0, float *arg1, const int *arg2, int set_size ) { //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call incConst_gpu(arg0+n*1, arg1+n*4, arg2); } } //host stub function void op_par_loop_incConst(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2){ int*arg2h = (int *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(4); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: incConst"); } int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); if (set_size > 0) { //transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OP_consts_h + consts_bytes; arg2.data_d = OP_consts_d + consts_bytes; for ( int d=0; d<1; d++ ){ ((int *)arg2.data)[d] = arg2h[d]; } consts_bytes += ROUND_UP(1*sizeof(int)); mvConstArraysToDevice(consts_bytes); //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else int nthread = OP_block_size; #endif int nblocks = 200; op_cuda_incConst<<<nblocks,nthread>>>( (float *) arg0.data_d, (float *) arg1.data_d, (int *) arg2.data_d, set->size ); } op_mpi_set_dirtybit_cuda(nargs, args); if (OP_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size * 2.0f; }
8158a781f70e08f7fd6eaf70a36bf3aff4fe1a28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* All modification made by Cambricon Corporation: 2018 Cambricon Corporation All rights reserved. All other contributions: Copyright (c) 2014--2018, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include "caffe/layers/slice_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Slice(const int nthreads, const Dtype* in_data, const bool forward, const int num_slices, const int slice_size, const int bottom_slice_axis, const int top_slice_axis, const int offset_slice_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; const int bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; if (forward) { out_data[index] = in_data[bottom_index]; } else { out_data[bottom_index] = in_data[index]; } } } template <typename Dtype> void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; } } template <typename Dtype> void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0] || top.size() == 1) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_diff, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); offset_slice_axis += top_slice_axis; } } INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer); } // namespace caffe
8158a781f70e08f7fd6eaf70a36bf3aff4fe1a28.cu
/* All modification made by Cambricon Corporation: © 2018 Cambricon Corporation All rights reserved. All other contributions: Copyright (c) 2014--2018, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include "caffe/layers/slice_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Slice(const int nthreads, const Dtype* in_data, const bool forward, const int num_slices, const int slice_size, const int bottom_slice_axis, const int top_slice_axis, const int offset_slice_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_slice_size = slice_size * top_slice_axis; const int slice_num = index / total_slice_size; const int slice_index = index % total_slice_size; const int bottom_index = slice_index + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; if (forward) { out_data[index] = in_data[bottom_index]; } else { out_data[bottom_index] = in_data[index]; } } } template <typename Dtype> void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom_data, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; } } template <typename Dtype> void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0] || top.size() == 1) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); const int top_slice_size = top_slice_axis * slice_size_; const int nthreads = top_slice_size * num_slices_; Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, top_diff, kForward, num_slices_, slice_size_, bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); offset_slice_axis += top_slice_axis; } } INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer); } // namespace caffe
d1690af039401f7cd95577b28bcb3f2bf607eb15.hip
// !!! This is a file automatically generated by hipify!!! /** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek) * @date 2012-2016 * @copyright University of Pennsylvania & Mauricio Mutai */ #include <cmath> #include <cstdio> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/random.h> #include <util/checkCUDAError.h> #include <util/tiny_gltf_loader.h> #include "rasterizeTools.h" #include "rasterize.h" #include <glm/gtc/quaternion.hpp> #include <glm/gtc/matrix_transform.hpp> #include <thrust/remove.h> #include <thrust/execution_policy.h> #define CUDA_MEASURE 0 #if CUDA_MEASURE == 0 // "undefine" cuda event things #define hipEventCreate(x) ((void)(0)) #define hipEventRecord(x) ((void)(0)) #define hipEventSynchronize(x) ((void)(0)) #define hipEventElapsedTime(x, y, z) ((void)(0)) #endif // CUDA_MEASURE == 0 #define PERSP_CORRECT 0 #define BILINEAR_INTERP 0 #define BACK_FACE_CULLING 1 #define SSAA_FACTOR 1 #if SSAA_FACTOR <= 0 #error SSAA_FACTOR must be > 0 #endif #define RENDER_FULL_TRIANGLE 0 #define RENDER_VERTICES 1 #define RENDER_EDGES 2 #define RENDER_MODE RENDER_FULL_TRIANGLE #define VERTEX_RENDER_SIZE 2 namespace { struct cudaMat4 { glm::vec4 x; glm::vec4 y; glm::vec4 z; glm::vec4 w; }; // LOOK: This is a custom function for multiplying cudaMat4 4x4 matrixes with vectors. // This is a workaround for GLM matrix multiplication not working properly on pre-Fermi NVIDIA GPUs. // Multiplies a cudaMat4 matrix and a vec4 and returns a vec3 clipped from the vec4 __host__ __device__ glm::vec4 multiplyMV4(cudaMat4 m, glm::vec4 v) { glm::vec4 r(1, 1, 1, 1); r.x = (m.x.x*v.x) + (m.x.y*v.y) + (m.x.z*v.z) + (m.x.w*v.w); r.y = (m.y.x*v.x) + (m.y.y*v.y) + (m.y.z*v.z) + (m.y.w*v.w); r.z = (m.z.x*v.x) + (m.z.y*v.y) + (m.z.z*v.z) + (m.z.w*v.w); r.w = (m.w.x*v.x) + (m.w.y*v.y) + (m.w.z*v.z) + (m.w.w*v.w); return r; } __host__ __device__ cudaMat4 glmMat4ToCudaMat4(glm::mat4 a) { cudaMat4 m; a = glm::transpose(a); m.x = a[0]; m.y = a[1]; m.z = a[2]; m.w = a[3]; return m; } typedef unsigned short VertexIndex; typedef glm::vec3 VertexAttributePosition; typedef glm::vec3 VertexAttributeNormal; typedef glm::vec2 VertexAttributeTexcoord; typedef unsigned char TextureData; typedef unsigned char BufferByte; enum PrimitiveType{ Point = 1, Line = 2, Triangle = 3 }; struct VertexOut { glm::vec4 pos; // TODO: add new attributes to your VertexOut // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation // glm::vec3 col; glm::vec2 texcoord0; TextureData* dev_diffuseTex = NULL; int texWidth, texHeight; // ... }; struct Primitive { PrimitiveType primitiveType = Triangle; // C++ 11 init VertexOut v[3]; }; struct Fragment { glm::vec3 color; glm::ivec2 screenCoord; float depth; // TODO: add new attributes to your Fragment // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; bool shouldShade = false; // VertexAttributeTexcoord texcoord0; // TextureData* dev_diffuseTex; // ... }; struct PrimitiveDevBufPointers { int primitiveMode; //from tinygltfloader macro PrimitiveType primitiveType; int numPrimitives; int numIndices; int numVertices; // Vertex In, const after loaded VertexIndex* dev_indices; VertexAttributePosition* dev_position; VertexAttributeNormal* dev_normal; VertexAttributeTexcoord* dev_texcoord0; // Materials, add more attributes when needed TextureData* dev_diffuseTex; int diffuseTexWidth; int diffuseTexHeight; // TextureData* dev_specularTex; // TextureData* dev_normalTex; // ... // Vertex Out, vertex used for rasterization, this is changing every frame VertexOut* dev_verticesOut; // TODO: add more attributes when needed }; } struct shouldCull { __host__ __device__ bool operator()(const Primitive& prim) { return (glm::dot(glm::normalize(prim.v[0].eyePos), prim.v[0].eyeNor) > 0.0f); } }; static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap; static int width = 0; static int height = 0; static int totalNumPrimitives = 0; static Primitive *dev_primitives = NULL; static Fragment *dev_fragmentBuffer = NULL; static glm::vec3 *dev_framebuffer = NULL; static int * dev_depth = NULL; // you might need this buffer when doing depth test static float * dev_depthValues = NULL; // stores depth values static int * dev_depthLocks = NULL; // locks Z-buffer #if CUDA_MEASURE float vertProcTimeAcc = 0.0f; float primAsmTimeAcc = 0.0f; #if BACK_FACE_CULLING float cullTimeAcc = 0.0f; int cullCountAcc = 0; #endif float rastTimeAcc = 0.0f; float fragShaderTimeAcc = 0.0f; float copyToPBOTimeAcc = 0.0f; int measureCount = 0; #define MEASURE_COUNT_MAX 1000 #endif // CUDA_MEASURE /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } /** * Writes fragment colors to the framebuffer */ __global__ void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 baseColor; glm::vec3 eyePos; glm::vec3 eyeNor; bool shouldShade = false; #if SSAA_FACTOR > 1 int aaIndex = SSAA_FACTOR * index; for (int dx = 0; dx < SSAA_FACTOR; dx++) { for (int dy = 0; dy < SSAA_FACTOR; dy++) { const Fragment &frag = fragmentBuffer[x * SSAA_FACTOR + dx + (y * SSAA_FACTOR + dy) * w * SSAA_FACTOR]; baseColor += frag.color; eyePos += frag.eyePos; eyeNor += frag.eyeNor; shouldShade |= frag.shouldShade; } } baseColor /= float(SSAA_FACTOR * SSAA_FACTOR); //eyePos /= float(SSAA_FACTOR * SSAA_FACTOR); //eyeNor /= float(SSAA_FACTOR * SSAA_FACTOR); eyeNor = glm::normalize(eyeNor); #else baseColor = fragmentBuffer[index].color; eyePos = fragmentBuffer[index].eyePos; eyeNor = fragmentBuffer[index].eyeNor; shouldShade = fragmentBuffer[index].shouldShade; #endif // TODO: add your fragment shader code here if (shouldShade) { float lambert = glm::clamp(glm::dot(glm::normalize(-eyePos), eyeNor), 0.0f, 1.0f); framebuffer[index] = baseColor * lambert; } else { framebuffer[index] = baseColor; } } } /** * Called once at the beginning of the program to allocate memory. */ void rasterizeInit(int w, int h) { width = w * SSAA_FACTOR; height = h * SSAA_FACTOR; printf("width: %d, height: %d\n", w, h); hipFree(dev_fragmentBuffer); hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment)); hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); hipFree(dev_framebuffer); hipMalloc(&dev_framebuffer, w * h * sizeof(glm::vec3)); hipMemset(dev_framebuffer, 0, w * h * sizeof(glm::vec3)); hipFree(dev_depth); hipMalloc(&dev_depth, width * height * sizeof(int)); hipMalloc(&dev_depthValues, width * height * sizeof(float)); hipMalloc(&dev_depthLocks, width * height * sizeof(int)); checkCUDAError("rasterizeInit"); } __global__ void initDepth(int w, int h, int * depth) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); depth[index] = INT_MAX; } } __global__ void initDepthValues(int w, int h, float * depthValues) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); depthValues[index] = 2.0f; } } __global__ void initDepthLocks(int w, int h, int * depthLocks) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); depthLocks[index] = 0; } } /** * kern function with support for stride to sometimes replace hipMemcpy * One thread is responsible for copying one component */ __global__ void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) { // Attribute (vec3 position) // component (3 * float) // byte (4 * byte) // id of component int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { int count = i / n; int offset = i - count * n; // which component of the attribute for (int j = 0; j < componentTypeByteSize; j++) { dev_dst[count * componentTypeByteSize * n + offset * componentTypeByteSize + j] = dev_src[byteOffset + count * (byteStride == 0 ? componentTypeByteSize * n : byteStride) + offset * componentTypeByteSize + j]; } } } __global__ void _nodeMatrixTransform( int numVertices, VertexAttributePosition* position, VertexAttributeNormal* normal, glm::mat4 MV, glm::mat3 MV_normal) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f)); normal[vid] = glm::normalize(MV_normal * normal[vid]); } } glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) { glm::mat4 curMatrix(1.0); const std::vector<double> &m = n.matrix; if (m.size() > 0) { // matrix, copy it for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { curMatrix[i][j] = (float)m.at(4 * i + j); } } } else { // no matrix, use rotation, scale, translation if (n.translation.size() > 0) { curMatrix[3][0] = n.translation[0]; curMatrix[3][1] = n.translation[1]; curMatrix[3][2] = n.translation[2]; } if (n.rotation.size() > 0) { glm::mat4 R; glm::quat q; q[0] = n.rotation[0]; q[1] = n.rotation[1]; q[2] = n.rotation[2]; R = glm::mat4_cast(q); curMatrix = curMatrix * R; } if (n.scale.size() > 0) { curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2])); } } return curMatrix; } void traverseNode ( std::map<std::string, glm::mat4> & n2m, const tinygltf::Scene & scene, const std::string & nodeString, const glm::mat4 & parentMatrix ) { const tinygltf::Node & n = scene.nodes.at(nodeString); glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n); n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M)); auto it = n.children.begin(); auto itEnd = n.children.end(); for (; it != itEnd; ++it) { traverseNode(n2m, scene, *it, M); } } void rasterizeSetBuffers(const tinygltf::Scene & scene) { totalNumPrimitives = 0; std::map<std::string, BufferByte*> bufferViewDevPointers; // 1. copy all `bufferViews` to device memory { std::map<std::string, tinygltf::BufferView>::const_iterator it( scene.bufferViews.begin()); std::map<std::string, tinygltf::BufferView>::const_iterator itEnd( scene.bufferViews.end()); for (; it != itEnd; it++) { const std::string key = it->first; const tinygltf::BufferView &bufferView = it->second; if (bufferView.target == 0) { continue; // Unsupported bufferView. } const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer); BufferByte* dev_bufferView; hipMalloc(&dev_bufferView, bufferView.byteLength); hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice); checkCUDAError("Set BufferView Device Mem"); bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView)); } } // 2. for each mesh: // for each primitive: // build device buffer of indices, materail, and each attributes // and store these pointers in a map { std::map<std::string, glm::mat4> nodeString2Matrix; auto rootNodeNamesList = scene.scenes.at(scene.defaultScene); { auto it = rootNodeNamesList.begin(); auto itEnd = rootNodeNamesList.end(); for (; it != itEnd; ++it) { traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f)); } } // parse through node to access mesh auto itNode = nodeString2Matrix.begin(); auto itEndNode = nodeString2Matrix.end(); for (; itNode != itEndNode; ++itNode) { const tinygltf::Node & N = scene.nodes.at(itNode->first); const glm::mat4 & matrix = itNode->second; const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix))); auto itMeshName = N.meshes.begin(); auto itEndMeshName = N.meshes.end(); for (; itMeshName != itEndMeshName; ++itMeshName) { const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName); auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>())); std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second; // for each primitive for (size_t i = 0; i < mesh.primitives.size(); i++) { const tinygltf::Primitive &primitive = mesh.primitives[i]; if (primitive.indices.empty()) return; // TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes VertexIndex* dev_indices = NULL; VertexAttributePosition* dev_position = NULL; VertexAttributeNormal* dev_normal = NULL; VertexAttributeTexcoord* dev_texcoord0 = NULL; // ----------Indices------------- const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices); const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView); BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView); // assume type is SCALAR for indices int n = 1; int numIndices = indexAccessor.count; int componentTypeByteSize = sizeof(VertexIndex); int byteLength = numIndices * n * componentTypeByteSize; dim3 numThreadsPerBlock(128); dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); hipMalloc(&dev_indices, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( numIndices, (BufferByte*)dev_indices, dev_bufferView, n, indexAccessor.byteStride, indexAccessor.byteOffset, componentTypeByteSize); checkCUDAError("Set Index Buffer"); // ---------Primitive Info------- // Warning: LINE_STRIP is not supported in tinygltfloader int numPrimitives; PrimitiveType primitiveType; switch (primitive.mode) { case TINYGLTF_MODE_TRIANGLES: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices / 3; break; case TINYGLTF_MODE_TRIANGLE_STRIP: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_TRIANGLE_FAN: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_LINE: primitiveType = PrimitiveType::Line; numPrimitives = numIndices / 2; break; case TINYGLTF_MODE_LINE_LOOP: primitiveType = PrimitiveType::Line; numPrimitives = numIndices + 1; break; case TINYGLTF_MODE_POINTS: primitiveType = PrimitiveType::Point; numPrimitives = numIndices; break; default: // output error break; }; // ----------Attributes------------- auto it(primitive.attributes.begin()); auto itEnd(primitive.attributes.end()); int numVertices = 0; // for each attribute for (; it != itEnd; it++) { const tinygltf::Accessor &accessor = scene.accessors.at(it->second); const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView); int n = 1; if (accessor.type == TINYGLTF_TYPE_SCALAR) { n = 1; } else if (accessor.type == TINYGLTF_TYPE_VEC2) { n = 2; } else if (accessor.type == TINYGLTF_TYPE_VEC3) { n = 3; } else if (accessor.type == TINYGLTF_TYPE_VEC4) { n = 4; } BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView); BufferByte ** dev_attribute = NULL; numVertices = accessor.count; int componentTypeByteSize; // Note: since the type of our attribute array (dev_position) is static (float32) // We assume the glTF model attribute type are 5126(FLOAT) here if (it->first.compare("POSITION") == 0) { componentTypeByteSize = sizeof(VertexAttributePosition) / n; dev_attribute = (BufferByte**)&dev_position; } else if (it->first.compare("NORMAL") == 0) { componentTypeByteSize = sizeof(VertexAttributeNormal) / n; dev_attribute = (BufferByte**)&dev_normal; } else if (it->first.compare("TEXCOORD_0") == 0) { componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n; dev_attribute = (BufferByte**)&dev_texcoord0; } std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n'; dim3 numThreadsPerBlock(128); dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); int byteLength = numVertices * n * componentTypeByteSize; hipMalloc(dev_attribute, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( n * numVertices, *dev_attribute, dev_bufferView, n, accessor.byteStride, accessor.byteOffset, componentTypeByteSize); std::string msg = "Set Attribute Buffer: " + it->first; checkCUDAError(msg.c_str()); } // malloc for VertexOut VertexOut* dev_vertexOut; hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut)); checkCUDAError("Malloc VertexOut Buffer"); // ----------Materials------------- // You can only worry about this part once you started to // implement textures for your rasterizer TextureData* dev_diffuseTex = NULL; int diffuseTexWidth = 0; int diffuseTexHeight = 0; if (!primitive.material.empty()) { const tinygltf::Material &mat = scene.materials.at(primitive.material); printf("material.name = %s\n", mat.name.c_str()); if (mat.values.find("diffuse") != mat.values.end()) { std::string diffuseTexName = mat.values.at("diffuse").string_value; if (scene.textures.find(diffuseTexName) != scene.textures.end()) { const tinygltf::Texture &tex = scene.textures.at(diffuseTexName); if (scene.images.find(tex.source) != scene.images.end()) { const tinygltf::Image &image = scene.images.at(tex.source); size_t s = image.image.size() * sizeof(TextureData); hipMalloc(&dev_diffuseTex, s); hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice); diffuseTexWidth = image.width; diffuseTexHeight = image.height; checkCUDAError("Set Texture Image data"); } } } // TODO: write your code for other materails // You may have to take a look at tinygltfloader // You can also use the above code loading diffuse material as a start point } // ---------Node hierarchy transform-------- hipDeviceSynchronize(); dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); _nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > ( numVertices, dev_position, dev_normal, matrix, matrixNormal); checkCUDAError("Node hierarchy transformation"); // at the end of the for loop of primitive // push dev pointers to map primitiveVector.push_back(PrimitiveDevBufPointers{ primitive.mode, primitiveType, numPrimitives, numIndices, numVertices, dev_indices, dev_position, dev_normal, dev_texcoord0, dev_diffuseTex, diffuseTexWidth, diffuseTexHeight, dev_vertexOut //VertexOut }); totalNumPrimitives += numPrimitives; } // for each primitive } // for each mesh } // for each node } // 3. Malloc for dev_primitives { hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive)); } // Finally, hipFree raw dev_bufferViews { std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin()); std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end()); //bufferViewDevPointers for (; it != itEnd; it++) { hipFree(it->second); } checkCUDAError("Free BufferView Device Mem"); } } __global__ void _vertexTransformAndAssembly( int numVertices, PrimitiveDevBufPointers primitive, glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal, int width, int height) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { // TODO: Apply vertex transformation here glm::vec4 pos = glm::vec4(primitive.dev_position[vid], 1.0f); glm::vec3 nor = primitive.dev_normal[vid]; glm::vec3 eyePos = glm::vec3(MV * pos); // Multiply the MVP matrix for each vertex position, this will transform everything into clipping space pos = MVP * pos;//multiplyMV4(glmMat4ToCudaMat4(MVP), pos); nor = glm::normalize(MV_normal * nor); // Then divide the pos by its w element to transform into NDC space pos /= pos.w; // Finally transform x and y to viewport space pos.x = (pos.x + 1.0) * 0.5f * width; pos.y = (1.0f - pos.y) * 0.5f * height; // TODO: Apply vertex assembly here // Assemble all attribute arraies into the primitive array primitive.dev_verticesOut[vid].pos = pos; primitive.dev_verticesOut[vid].eyeNor = nor; primitive.dev_verticesOut[vid].eyePos = eyePos; primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex; if (primitive.dev_texcoord0 != NULL) { primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth; primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight; primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid]; } } } static int curPrimitiveBeginId = 0; __global__ void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) { // index id int iid = (blockIdx.x * blockDim.x) + threadIdx.x; if (iid < numIndices) { // TODO: uncomment the following code for a start // This is primitive assembly for triangles int pid; // id for cur primitives vector if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) { pid = iid / (int)primitive.primitiveType; dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType] = primitive.dev_verticesOut[primitive.dev_indices[iid]]; } // TODO: other primitive types (point, line) } } __device__ float triArea(const glm::vec3& pt0, const glm::vec3& pt1, const glm::vec3& pt2) { // don't divide by 2 because all calls should use this and we aren't concerned // about the actual value of the area, just the relative values return glm::length(glm::cross(pt0 - pt1, pt0 - pt2)); } __device__ bool isInTriangle(glm::vec3* triPoints, const glm::vec3& pt, float totalArea, float* baryWeights) { baryWeights[2] = triArea(pt, triPoints[0], triPoints[1]); baryWeights[0] = triArea(pt, triPoints[1], triPoints[2]); baryWeights[1] = triArea(pt, triPoints[0], triPoints[2]); return (baryWeights[0] + baryWeights[1] + baryWeights[2]) <= totalArea; } #if BILINEAR_INTERP __device__ glm::vec3 colorFromUV(TextureData* texture, glm::vec2 texCoord, int texWidth, int texHeight) { glm::vec2 scaledTexCoord = texCoord * glm::vec2(texWidth, texHeight); glm::ivec2 intScaledTexCoord = glm::ivec2(scaledTexCoord); glm::ivec2 nextScaledTexCoord = glm::clamp(intScaledTexCoord + glm::ivec2(1), glm::ivec2(0), glm::ivec2(texWidth - 1, texHeight - 1)); int idx = intScaledTexCoord.x + intScaledTexCoord.y * texWidth; glm::vec3 col00 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); idx = nextScaledTexCoord.x + intScaledTexCoord.y * texWidth; glm::vec3 col10 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); idx = intScaledTexCoord.x + nextScaledTexCoord.y * texWidth; glm::vec3 col01 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); idx = nextScaledTexCoord.x + nextScaledTexCoord.y * texWidth; glm::vec3 col11 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); glm::vec2 diff = scaledTexCoord - glm::vec2(intScaledTexCoord); return (1.0f - diff.x) * (1.0f - diff.y) * col00 + diff.x * (1.0f - diff.y) * col10 + (1.0f - diff.x) * diff.y * col01 + diff.x * diff.y * col11; } #else __device__ glm::vec3 colorFromUV(TextureData* texture, glm::vec2 texCoord, int texWidth, int texHeight) { int idx = (int)(texCoord.x * texWidth) + (int)(texCoord.y * texHeight) * texWidth; glm::vec3 col = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); return col; } #endif __global__ void rast(Primitive* dev_primitives, int primitivesCount, int w, int h, Fragment *fragmentBuffer, float *dev_depthValues, int *dev_depthLocks) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < primitivesCount) { Primitive& prim = dev_primitives[idx]; // assume triangle glm::vec2 bboxMin = glm::max(glm::vec2(0.0f), glm::min(glm::vec2(prim.v[0].pos), glm::min(glm::vec2(prim.v[1].pos), glm::vec2(prim.v[2].pos)))); glm::vec2 bboxMax = glm::min(glm::vec2((float)(w - 1), (float)(h - 1)), glm::max(glm::vec2(prim.v[0].pos), glm::max(glm::vec2(prim.v[1].pos), glm::vec2(prim.v[2].pos)))); bboxMin = glm::floor(bboxMin); bboxMax = glm::ceil(bboxMax); glm::vec3 triPoints[3]; triPoints[0] = glm::vec3(prim.v[0].pos); //triPoints[0].z = 0.0f; triPoints[1] = glm::vec3(prim.v[1].pos); //triPoints[1].z = 0.0f; triPoints[2] = glm::vec3(prim.v[2].pos); //triPoints[2].z = 0.0f; // make totalArea slightly larger to reduce "shadow acne" due to FP error float totalArea = triArea(triPoints[0], triPoints[1], triPoints[2]) * 1.0001f; float baryWeights[3]; glm::vec3 baryCoords; bool hasTexture = prim.v[0].dev_diffuseTex != NULL; #if RENDER_MODE == RENDER_FULL_TRIANGLE for (float y = bboxMin.y; y <= bboxMax.y; y += 1.0f) { for (float x = bboxMin.x; x <= bboxMax.x; x += 1.0f) { baryCoords = calculateBarycentricCoordinate(triPoints, glm::vec2(x, y)); if (isBarycentricCoordInBounds(baryCoords)) { // TODO: persp-correct baryWeights[0] = baryCoords[0]; baryWeights[1] = baryCoords[1]; baryWeights[2] = baryCoords[2]; #if PERSP_CORRECT float z = 1.0f / (baryWeights[0] / prim.v[0].eyePos.z + baryWeights[1] / prim.v[1].eyePos.z + baryWeights[2] / prim.v[2].eyePos.z); baryWeights[0] *= z / prim.v[0].eyePos.z; baryWeights[1] *= z / prim.v[1].eyePos.z; baryWeights[2] *= z / prim.v[2].eyePos.z; z = -getZAtCoordinate(baryCoords, triPoints); #else float z = -getZAtCoordinate(baryCoords, triPoints); #endif // depth check // lock this fragment on the depth buffer Fragment frag; glm::vec3 nor = baryWeights[0] * prim.v[0].eyeNor + baryWeights[1] * prim.v[1].eyeNor + baryWeights[2] * prim.v[2].eyeNor; // texture mapping if (hasTexture) { glm::vec2 texCoord = baryWeights[0] * prim.v[0].texcoord0 + baryWeights[1] * prim.v[1].texcoord0 + baryWeights[2] * prim.v[2].texcoord0; // check if UV are in range (may not be if Z value is weird) if (texCoord.x < 0.0f || texCoord.x > 1.0f || texCoord.y < 0.0f || texCoord.y > 1.0f) { continue; } frag.shouldShade = true; frag.color = colorFromUV(prim.v[0].dev_diffuseTex, texCoord, prim.v[0].texWidth, prim.v[0].texHeight); } else { // color using normal nor = glm::normalize(nor); // check if coords are in range (may not be if Z value is weird) if (nor.x < -1.0f || nor.x > 1.0f || nor.y < -1.0f || nor.y > 1.0f || nor.z < -1.0f || nor.z > 1.0f) { continue; } frag.shouldShade = false; frag.color = glm::abs(nor); } frag.eyeNor = nor; frag.eyePos = baryWeights[0] * prim.v[0].eyePos + baryWeights[1] * prim.v[1].eyePos + baryWeights[2] * prim.v[2].eyePos; frag.screenCoord = glm::ivec2((int)x, (int)y); frag.depth = z; //frag.color = glm::vec3(z); // add fragment int fragIdx = (int)x + (int)y * w; // magic to make mutex work bool isSet; do { isSet = (atomicCAS(&dev_depthLocks[fragIdx], 0, 1) == 0); if (isSet) { // critical section if (z < dev_depthValues[fragIdx]) { dev_depthValues[fragIdx] = z; fragmentBuffer[fragIdx] = frag; } // unlock fragment dev_depthLocks[fragIdx] = 0;//atomicExch(&dev_depthLocks[fragIdx], 0); } } while (!isSet); } } } #elif RENDER_MODE == RENDER_VERTICES for (int i = 0; i < 3; i++) { const glm::vec3& vert = triPoints[i]; int x = (int)vert.x; int y = (int)vert.y; if (x >= w || y >= h || x < 0 || y < 0) { continue; } Fragment frag; if (hasTexture) { frag.shouldShade = true; frag.color = colorFromUV(prim.v[0].dev_diffuseTex, prim.v[i].texcoord0, prim.v[i].texWidth, prim.v[i].texHeight); } else { frag.shouldShade = false; frag.color = glm::abs(prim.v[i].eyeNor); } frag.eyePos = prim.v[i].eyePos; frag.eyeNor = prim.v[i].eyeNor; float z = vert.z; int xLimit = glm::clamp(x + VERTEX_RENDER_SIZE, x, w - 1); int yLimit = glm::clamp(y + VERTEX_RENDER_SIZE, y, h - 1); for (int fragX = x; fragX <= xLimit; fragX++) { for (int fragY = y; fragY <= yLimit; fragY++) { int fragIdx = fragX + fragY * w; bool isSet; do { isSet = true;// (atomicCAS(&dev_depthLocks[fragIdx], 0, 1) == 0); if (isSet) { // critical section if (z < dev_depthValues[fragIdx]) { dev_depthValues[fragIdx] = z; fragmentBuffer[fragIdx] = frag; } // unlock fragment dev_depthLocks[fragIdx] = 0;//atomicExch(&dev_depthLocks[fragIdx], 0); } } while (!isSet); } } } #elif RENDER_MODE == RENDER_EDGES // Bresenham's line algorithm Fragment frag; frag.shouldShade = false; frag.color = glm::vec3(1.0f); for (int i = 0; i < 3; i++) { glm::ivec2 leftVert; glm::ivec2 rightVert; int nextIdx = i == 2 ? 0 : i + 1; if ((int)triPoints[i].x < 0 || (int)triPoints[i].x >= w || (int)triPoints[nextIdx].x < 0 || (int)triPoints[nextIdx].x >= w || (int)triPoints[i].y < 0 || (int)triPoints[i].y >= h || (int)triPoints[nextIdx].y < 0 || (int)triPoints[nextIdx].y >= h) { continue; } if ((int)triPoints[i].x == (int)triPoints[nextIdx].x) { // vertical line int yStart = glm::max(0, (int)glm::min(triPoints[i].y, triPoints[nextIdx].y)); int yEnd = glm::min(h - 1, (int)glm::max(triPoints[i].y, triPoints[nextIdx].y)); int x = (int)triPoints[i].x; // draw(x,y) for (int y = yStart; y <= yEnd; y++) { int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; } continue; } else if ((int)triPoints[i].y == (int)triPoints[nextIdx].y) { // horizontal line int xStart = glm::max(0, (int)glm::min(triPoints[i].x, triPoints[nextIdx].x)); int xEnd = glm::min(w - 1, (int)glm::max(triPoints[i].x, triPoints[nextIdx].x)); int y = (int)triPoints[i].y; // draw(x,y) for (int x = xStart; x <= xEnd; x++) { int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; } continue; } else if ((int)triPoints[i].x > (int)triPoints[nextIdx].x) { rightVert = glm::ivec2(triPoints[i]); leftVert = glm::ivec2(triPoints[nextIdx]); } else { leftVert = glm::ivec2(triPoints[i]); rightVert = glm::ivec2(triPoints[nextIdx]); } float dErr = abs((triPoints[i].y - triPoints[nextIdx].y) / (triPoints[i].x - triPoints[nextIdx].x)); bool downward = ((rightVert.y - leftVert.y) < 0); int increment = downward ? -1 : 1; float accErr = 0.0f; int xStart = glm::max(leftVert.x, 0); int y = leftVert.y; int xEnd = glm::min(rightVert.x, w - 1); for (int x = xStart; x <= xEnd; x++) { // draw(x,y) if (y < 0 || y >= h) { break; } int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; accErr += dErr; while (accErr >= 0.5f) { y += increment; // draw(x, y) if (y < 0 || y >= h) { break; } if ((downward && y == rightVert.y - 1) || (!downward && y == rightVert.y + 1)) { y -= increment; } int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; accErr -= 1.0f; } } } #endif // RENDER_MODE } } /** * Perform rasterization. */ void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockCount2d((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); // for frame buffer; will be different is SSAA_FACTOR > 1 dim3 frameBufferBlockCount2d((width / SSAA_FACTOR - 1) / blockSize2d.x + 1, (height / SSAA_FACTOR - 1) / blockSize2d.y + 1); // set up CUDA timing events hipEvent_t stageStart, stageEnd; hipEventCreate(&stageStart); hipEventCreate(&stageEnd); float vertProcTime = 0.0f; float primAsmTime = 0.0f; #if BACK_FACE_CULLING float cullTime; #endif float rastTime; float fragShaderTime; float copyToPBOTime; // Execute your rasterization pipeline here // (See README for rasterization pipeline outline.) // Vertex Process & primitive assembly { curPrimitiveBeginId = 0; dim3 numThreadsPerBlock(128); auto it = mesh2PrimitivesMap.begin(); auto itEnd = mesh2PrimitivesMap.end(); for (; it != itEnd; ++it) { auto p = (it->second).begin(); // each primitive auto pEnd = (it->second).end(); for (; p != pEnd; ++p) { float measurement; dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); checkCUDAError("pre-Vertex Processing"); hipEventRecord(stageStart); _vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height); hipEventRecord(stageEnd); checkCUDAError("post-Vertex Processing"); hipDeviceSynchronize(); hipEventSynchronize(stageEnd); hipEventElapsedTime(&measurement, stageStart, stageEnd); vertProcTime += measurement; hipEventRecord(stageStart); _primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> > (p->numIndices, curPrimitiveBeginId, dev_primitives, *p); hipEventRecord(stageEnd); checkCUDAError("Primitive Assembly"); hipEventSynchronize(stageEnd); hipEventElapsedTime(&measurement, stageStart, stageEnd); primAsmTime += measurement; curPrimitiveBeginId += p->numPrimitives; } } checkCUDAError("Vertex Processing and Primitive Assembly"); } hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); initDepthValues << <blockCount2d, blockSize2d >> >(width, height, dev_depthValues); hipMemset(dev_depthLocks, 0, width * height * sizeof(int)); // test screen-space tri #if 0 Primitive prim; prim.v[0].pos = glm::vec4(100.0f, 100.0f, 0.0f, 1.0f); prim.v[1].pos = glm::vec4(800.0f, 100.0f, 0.0f, 1.0f); prim.v[2].pos = glm::vec4(200.0f, 800.0f, 0.0f, 1.0f); hipMemcpy(dev_primitives, &prim, sizeof(Primitive), hipMemcpyHostToDevice); curPrimitiveBeginId = 1; #endif // back-face culling #if BACK_FACE_CULLING hipEventRecord(stageStart); Primitive *newEnd = thrust::remove_if(thrust::device, dev_primitives, dev_primitives + curPrimitiveBeginId, shouldCull()); #if CUDA_MEASURE cullCountAcc += (dev_primitives + curPrimitiveBeginId) - newEnd; #endif //printf("culled: %d", (dev_primitives + curPrimitiveBeginId) - newEnd); curPrimitiveBeginId = newEnd - dev_primitives; hipEventRecord(stageEnd); hipEventSynchronize(stageEnd); hipEventElapsedTime(&cullTime, stageStart, stageEnd); #endif // TODO: rasterize checkCUDAError("pre-actual rasterizer"); hipEventRecord(stageStart); rast << <dim3(curPrimitiveBeginId / 32 + 1), dim3(32) >> > (dev_primitives, curPrimitiveBeginId, width, height, dev_fragmentBuffer, dev_depthValues, dev_depthLocks); hipEventRecord(stageEnd); hipEventSynchronize(stageEnd); hipEventElapsedTime(&rastTime, stageStart, stageEnd); checkCUDAError("post-actual rasterizer"); #if 0 float *buf = (float *)malloc(width * height * sizeof(float)); hipMemcpy(buf, dev_depthValues, width * height * sizeof(float), hipMemcpyDeviceToHost); int ct = 0; for (int i = 0; i < width * height; i++) { if (buf[i] <= 0.0f) { printf("%.3f ", buf[i]); ct++; } } printf("ct: %d", ct); printf("\n"); free(buf); while (1); #endif // Copy depthbuffer colors into framebuffer hipEventRecord(stageStart); render << <frameBufferBlockCount2d, blockSize2d >> >(width / SSAA_FACTOR, height / SSAA_FACTOR, dev_fragmentBuffer, dev_framebuffer); hipEventRecord(stageEnd); hipEventSynchronize(stageEnd); hipEventElapsedTime(&fragShaderTime, stageStart, stageEnd); checkCUDAError("fragment shader"); hipEventRecord(stageStart); // Copy framebuffer into OpenGL buffer for OpenGL previewing hipLaunchKernelGGL(( sendImageToPBO), dim3(frameBufferBlockCount2d), dim3(blockSize2d) , 0, 0, pbo, width / SSAA_FACTOR, height / SSAA_FACTOR, dev_framebuffer); hipEventRecord(stageEnd); hipEventSynchronize(stageEnd); hipEventElapsedTime(&copyToPBOTime, stageStart, stageEnd); checkCUDAError("copy render result to pbo"); #if CUDA_MEASURE measureCount++; vertProcTimeAcc += vertProcTime; primAsmTimeAcc += primAsmTime; #if BACK_FACE_CULLING cullTimeAcc += cullTime; #endif rastTimeAcc += rastTime; fragShaderTimeAcc += fragShaderTime; copyToPBOTimeAcc += copyToPBOTime; if (measureCount >= MEASURE_COUNT_MAX) { // print measurements printf("Vertex Processing: %.4f\n", vertProcTimeAcc / (float)measureCount); printf("Primitive Assembly: %.4f\n", primAsmTimeAcc / (float)measureCount); #if BACK_FACE_CULLING printf("Back-face Culling: %.4f\n", cullTimeAcc / (float)measureCount); printf("Faces Culled: %.4f\n", (float)(cullCountAcc) / (float)measureCount); #endif printf("Rasterizer: %.4f\n", rastTimeAcc / (float)measureCount); printf("Fragment Shader: %.4f\n", fragShaderTimeAcc / (float)measureCount); printf("Copy to PBO: %.4f\n", copyToPBOTimeAcc / (float)measureCount); printf("\n"); measureCount = 0; vertProcTimeAcc = 0.0f; primAsmTimeAcc = 0.0f; #if BACK_FACE_CULLING cullTimeAcc = 0.0f; cullCountAcc = 0; #endif rastTimeAcc = 0.0f; fragShaderTimeAcc = 0.0f; copyToPBOTimeAcc = 0.0f; } #endif } /** * Called once at the end of the program to free CUDA memory. */ void rasterizeFree() { // deconstruct primitives attribute/indices device buffer auto it(mesh2PrimitivesMap.begin()); auto itEnd(mesh2PrimitivesMap.end()); for (; it != itEnd; ++it) { for (auto p = it->second.begin(); p != it->second.end(); ++p) { hipFree(p->dev_indices); hipFree(p->dev_position); hipFree(p->dev_normal); hipFree(p->dev_texcoord0); hipFree(p->dev_diffuseTex); hipFree(p->dev_verticesOut); //TODO: release other attributes and materials } } //////////// hipFree(dev_primitives); dev_primitives = NULL; hipFree(dev_fragmentBuffer); dev_fragmentBuffer = NULL; hipFree(dev_framebuffer); dev_framebuffer = NULL; hipFree(dev_depth); dev_depth = NULL; hipFree(dev_depthValues); dev_depthValues = NULL; hipFree(dev_depthLocks); dev_depthLocks = NULL; checkCUDAError("rasterize Free"); }
d1690af039401f7cd95577b28bcb3f2bf607eb15.cu
/** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek) * @date 2012-2016 * @copyright University of Pennsylvania & Mauricio Mutai */ #include <cmath> #include <cstdio> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/random.h> #include <util/checkCUDAError.h> #include <util/tiny_gltf_loader.h> #include "rasterizeTools.h" #include "rasterize.h" #include <glm/gtc/quaternion.hpp> #include <glm/gtc/matrix_transform.hpp> #include <thrust/remove.h> #include <thrust/execution_policy.h> #define CUDA_MEASURE 0 #if CUDA_MEASURE == 0 // "undefine" cuda event things #define cudaEventCreate(x) ((void)(0)) #define cudaEventRecord(x) ((void)(0)) #define cudaEventSynchronize(x) ((void)(0)) #define cudaEventElapsedTime(x, y, z) ((void)(0)) #endif // CUDA_MEASURE == 0 #define PERSP_CORRECT 0 #define BILINEAR_INTERP 0 #define BACK_FACE_CULLING 1 #define SSAA_FACTOR 1 #if SSAA_FACTOR <= 0 #error SSAA_FACTOR must be > 0 #endif #define RENDER_FULL_TRIANGLE 0 #define RENDER_VERTICES 1 #define RENDER_EDGES 2 #define RENDER_MODE RENDER_FULL_TRIANGLE #define VERTEX_RENDER_SIZE 2 namespace { struct cudaMat4 { glm::vec4 x; glm::vec4 y; glm::vec4 z; glm::vec4 w; }; // LOOK: This is a custom function for multiplying cudaMat4 4x4 matrixes with vectors. // This is a workaround for GLM matrix multiplication not working properly on pre-Fermi NVIDIA GPUs. // Multiplies a cudaMat4 matrix and a vec4 and returns a vec3 clipped from the vec4 __host__ __device__ glm::vec4 multiplyMV4(cudaMat4 m, glm::vec4 v) { glm::vec4 r(1, 1, 1, 1); r.x = (m.x.x*v.x) + (m.x.y*v.y) + (m.x.z*v.z) + (m.x.w*v.w); r.y = (m.y.x*v.x) + (m.y.y*v.y) + (m.y.z*v.z) + (m.y.w*v.w); r.z = (m.z.x*v.x) + (m.z.y*v.y) + (m.z.z*v.z) + (m.z.w*v.w); r.w = (m.w.x*v.x) + (m.w.y*v.y) + (m.w.z*v.z) + (m.w.w*v.w); return r; } __host__ __device__ cudaMat4 glmMat4ToCudaMat4(glm::mat4 a) { cudaMat4 m; a = glm::transpose(a); m.x = a[0]; m.y = a[1]; m.z = a[2]; m.w = a[3]; return m; } typedef unsigned short VertexIndex; typedef glm::vec3 VertexAttributePosition; typedef glm::vec3 VertexAttributeNormal; typedef glm::vec2 VertexAttributeTexcoord; typedef unsigned char TextureData; typedef unsigned char BufferByte; enum PrimitiveType{ Point = 1, Line = 2, Triangle = 3 }; struct VertexOut { glm::vec4 pos; // TODO: add new attributes to your VertexOut // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation // glm::vec3 col; glm::vec2 texcoord0; TextureData* dev_diffuseTex = NULL; int texWidth, texHeight; // ... }; struct Primitive { PrimitiveType primitiveType = Triangle; // C++ 11 init VertexOut v[3]; }; struct Fragment { glm::vec3 color; glm::ivec2 screenCoord; float depth; // TODO: add new attributes to your Fragment // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; bool shouldShade = false; // VertexAttributeTexcoord texcoord0; // TextureData* dev_diffuseTex; // ... }; struct PrimitiveDevBufPointers { int primitiveMode; //from tinygltfloader macro PrimitiveType primitiveType; int numPrimitives; int numIndices; int numVertices; // Vertex In, const after loaded VertexIndex* dev_indices; VertexAttributePosition* dev_position; VertexAttributeNormal* dev_normal; VertexAttributeTexcoord* dev_texcoord0; // Materials, add more attributes when needed TextureData* dev_diffuseTex; int diffuseTexWidth; int diffuseTexHeight; // TextureData* dev_specularTex; // TextureData* dev_normalTex; // ... // Vertex Out, vertex used for rasterization, this is changing every frame VertexOut* dev_verticesOut; // TODO: add more attributes when needed }; } struct shouldCull { __host__ __device__ bool operator()(const Primitive& prim) { return (glm::dot(glm::normalize(prim.v[0].eyePos), prim.v[0].eyeNor) > 0.0f); } }; static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap; static int width = 0; static int height = 0; static int totalNumPrimitives = 0; static Primitive *dev_primitives = NULL; static Fragment *dev_fragmentBuffer = NULL; static glm::vec3 *dev_framebuffer = NULL; static int * dev_depth = NULL; // you might need this buffer when doing depth test static float * dev_depthValues = NULL; // stores depth values static int * dev_depthLocks = NULL; // locks Z-buffer #if CUDA_MEASURE float vertProcTimeAcc = 0.0f; float primAsmTimeAcc = 0.0f; #if BACK_FACE_CULLING float cullTimeAcc = 0.0f; int cullCountAcc = 0; #endif float rastTimeAcc = 0.0f; float fragShaderTimeAcc = 0.0f; float copyToPBOTimeAcc = 0.0f; int measureCount = 0; #define MEASURE_COUNT_MAX 1000 #endif // CUDA_MEASURE /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } /** * Writes fragment colors to the framebuffer */ __global__ void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 baseColor; glm::vec3 eyePos; glm::vec3 eyeNor; bool shouldShade = false; #if SSAA_FACTOR > 1 int aaIndex = SSAA_FACTOR * index; for (int dx = 0; dx < SSAA_FACTOR; dx++) { for (int dy = 0; dy < SSAA_FACTOR; dy++) { const Fragment &frag = fragmentBuffer[x * SSAA_FACTOR + dx + (y * SSAA_FACTOR + dy) * w * SSAA_FACTOR]; baseColor += frag.color; eyePos += frag.eyePos; eyeNor += frag.eyeNor; shouldShade |= frag.shouldShade; } } baseColor /= float(SSAA_FACTOR * SSAA_FACTOR); //eyePos /= float(SSAA_FACTOR * SSAA_FACTOR); //eyeNor /= float(SSAA_FACTOR * SSAA_FACTOR); eyeNor = glm::normalize(eyeNor); #else baseColor = fragmentBuffer[index].color; eyePos = fragmentBuffer[index].eyePos; eyeNor = fragmentBuffer[index].eyeNor; shouldShade = fragmentBuffer[index].shouldShade; #endif // TODO: add your fragment shader code here if (shouldShade) { float lambert = glm::clamp(glm::dot(glm::normalize(-eyePos), eyeNor), 0.0f, 1.0f); framebuffer[index] = baseColor * lambert; } else { framebuffer[index] = baseColor; } } } /** * Called once at the beginning of the program to allocate memory. */ void rasterizeInit(int w, int h) { width = w * SSAA_FACTOR; height = h * SSAA_FACTOR; printf("width: %d, height: %d\n", w, h); cudaFree(dev_fragmentBuffer); cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment)); cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); cudaFree(dev_framebuffer); cudaMalloc(&dev_framebuffer, w * h * sizeof(glm::vec3)); cudaMemset(dev_framebuffer, 0, w * h * sizeof(glm::vec3)); cudaFree(dev_depth); cudaMalloc(&dev_depth, width * height * sizeof(int)); cudaMalloc(&dev_depthValues, width * height * sizeof(float)); cudaMalloc(&dev_depthLocks, width * height * sizeof(int)); checkCUDAError("rasterizeInit"); } __global__ void initDepth(int w, int h, int * depth) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); depth[index] = INT_MAX; } } __global__ void initDepthValues(int w, int h, float * depthValues) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); depthValues[index] = 2.0f; } } __global__ void initDepthLocks(int w, int h, int * depthLocks) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); depthLocks[index] = 0; } } /** * kern function with support for stride to sometimes replace cudaMemcpy * One thread is responsible for copying one component */ __global__ void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) { // Attribute (vec3 position) // component (3 * float) // byte (4 * byte) // id of component int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { int count = i / n; int offset = i - count * n; // which component of the attribute for (int j = 0; j < componentTypeByteSize; j++) { dev_dst[count * componentTypeByteSize * n + offset * componentTypeByteSize + j] = dev_src[byteOffset + count * (byteStride == 0 ? componentTypeByteSize * n : byteStride) + offset * componentTypeByteSize + j]; } } } __global__ void _nodeMatrixTransform( int numVertices, VertexAttributePosition* position, VertexAttributeNormal* normal, glm::mat4 MV, glm::mat3 MV_normal) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f)); normal[vid] = glm::normalize(MV_normal * normal[vid]); } } glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) { glm::mat4 curMatrix(1.0); const std::vector<double> &m = n.matrix; if (m.size() > 0) { // matrix, copy it for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { curMatrix[i][j] = (float)m.at(4 * i + j); } } } else { // no matrix, use rotation, scale, translation if (n.translation.size() > 0) { curMatrix[3][0] = n.translation[0]; curMatrix[3][1] = n.translation[1]; curMatrix[3][2] = n.translation[2]; } if (n.rotation.size() > 0) { glm::mat4 R; glm::quat q; q[0] = n.rotation[0]; q[1] = n.rotation[1]; q[2] = n.rotation[2]; R = glm::mat4_cast(q); curMatrix = curMatrix * R; } if (n.scale.size() > 0) { curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2])); } } return curMatrix; } void traverseNode ( std::map<std::string, glm::mat4> & n2m, const tinygltf::Scene & scene, const std::string & nodeString, const glm::mat4 & parentMatrix ) { const tinygltf::Node & n = scene.nodes.at(nodeString); glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n); n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M)); auto it = n.children.begin(); auto itEnd = n.children.end(); for (; it != itEnd; ++it) { traverseNode(n2m, scene, *it, M); } } void rasterizeSetBuffers(const tinygltf::Scene & scene) { totalNumPrimitives = 0; std::map<std::string, BufferByte*> bufferViewDevPointers; // 1. copy all `bufferViews` to device memory { std::map<std::string, tinygltf::BufferView>::const_iterator it( scene.bufferViews.begin()); std::map<std::string, tinygltf::BufferView>::const_iterator itEnd( scene.bufferViews.end()); for (; it != itEnd; it++) { const std::string key = it->first; const tinygltf::BufferView &bufferView = it->second; if (bufferView.target == 0) { continue; // Unsupported bufferView. } const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer); BufferByte* dev_bufferView; cudaMalloc(&dev_bufferView, bufferView.byteLength); cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice); checkCUDAError("Set BufferView Device Mem"); bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView)); } } // 2. for each mesh: // for each primitive: // build device buffer of indices, materail, and each attributes // and store these pointers in a map { std::map<std::string, glm::mat4> nodeString2Matrix; auto rootNodeNamesList = scene.scenes.at(scene.defaultScene); { auto it = rootNodeNamesList.begin(); auto itEnd = rootNodeNamesList.end(); for (; it != itEnd; ++it) { traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f)); } } // parse through node to access mesh auto itNode = nodeString2Matrix.begin(); auto itEndNode = nodeString2Matrix.end(); for (; itNode != itEndNode; ++itNode) { const tinygltf::Node & N = scene.nodes.at(itNode->first); const glm::mat4 & matrix = itNode->second; const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix))); auto itMeshName = N.meshes.begin(); auto itEndMeshName = N.meshes.end(); for (; itMeshName != itEndMeshName; ++itMeshName) { const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName); auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>())); std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second; // for each primitive for (size_t i = 0; i < mesh.primitives.size(); i++) { const tinygltf::Primitive &primitive = mesh.primitives[i]; if (primitive.indices.empty()) return; // TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes VertexIndex* dev_indices = NULL; VertexAttributePosition* dev_position = NULL; VertexAttributeNormal* dev_normal = NULL; VertexAttributeTexcoord* dev_texcoord0 = NULL; // ----------Indices------------- const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices); const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView); BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView); // assume type is SCALAR for indices int n = 1; int numIndices = indexAccessor.count; int componentTypeByteSize = sizeof(VertexIndex); int byteLength = numIndices * n * componentTypeByteSize; dim3 numThreadsPerBlock(128); dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); cudaMalloc(&dev_indices, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( numIndices, (BufferByte*)dev_indices, dev_bufferView, n, indexAccessor.byteStride, indexAccessor.byteOffset, componentTypeByteSize); checkCUDAError("Set Index Buffer"); // ---------Primitive Info------- // Warning: LINE_STRIP is not supported in tinygltfloader int numPrimitives; PrimitiveType primitiveType; switch (primitive.mode) { case TINYGLTF_MODE_TRIANGLES: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices / 3; break; case TINYGLTF_MODE_TRIANGLE_STRIP: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_TRIANGLE_FAN: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_LINE: primitiveType = PrimitiveType::Line; numPrimitives = numIndices / 2; break; case TINYGLTF_MODE_LINE_LOOP: primitiveType = PrimitiveType::Line; numPrimitives = numIndices + 1; break; case TINYGLTF_MODE_POINTS: primitiveType = PrimitiveType::Point; numPrimitives = numIndices; break; default: // output error break; }; // ----------Attributes------------- auto it(primitive.attributes.begin()); auto itEnd(primitive.attributes.end()); int numVertices = 0; // for each attribute for (; it != itEnd; it++) { const tinygltf::Accessor &accessor = scene.accessors.at(it->second); const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView); int n = 1; if (accessor.type == TINYGLTF_TYPE_SCALAR) { n = 1; } else if (accessor.type == TINYGLTF_TYPE_VEC2) { n = 2; } else if (accessor.type == TINYGLTF_TYPE_VEC3) { n = 3; } else if (accessor.type == TINYGLTF_TYPE_VEC4) { n = 4; } BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView); BufferByte ** dev_attribute = NULL; numVertices = accessor.count; int componentTypeByteSize; // Note: since the type of our attribute array (dev_position) is static (float32) // We assume the glTF model attribute type are 5126(FLOAT) here if (it->first.compare("POSITION") == 0) { componentTypeByteSize = sizeof(VertexAttributePosition) / n; dev_attribute = (BufferByte**)&dev_position; } else if (it->first.compare("NORMAL") == 0) { componentTypeByteSize = sizeof(VertexAttributeNormal) / n; dev_attribute = (BufferByte**)&dev_normal; } else if (it->first.compare("TEXCOORD_0") == 0) { componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n; dev_attribute = (BufferByte**)&dev_texcoord0; } std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n'; dim3 numThreadsPerBlock(128); dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); int byteLength = numVertices * n * componentTypeByteSize; cudaMalloc(dev_attribute, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( n * numVertices, *dev_attribute, dev_bufferView, n, accessor.byteStride, accessor.byteOffset, componentTypeByteSize); std::string msg = "Set Attribute Buffer: " + it->first; checkCUDAError(msg.c_str()); } // malloc for VertexOut VertexOut* dev_vertexOut; cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut)); checkCUDAError("Malloc VertexOut Buffer"); // ----------Materials------------- // You can only worry about this part once you started to // implement textures for your rasterizer TextureData* dev_diffuseTex = NULL; int diffuseTexWidth = 0; int diffuseTexHeight = 0; if (!primitive.material.empty()) { const tinygltf::Material &mat = scene.materials.at(primitive.material); printf("material.name = %s\n", mat.name.c_str()); if (mat.values.find("diffuse") != mat.values.end()) { std::string diffuseTexName = mat.values.at("diffuse").string_value; if (scene.textures.find(diffuseTexName) != scene.textures.end()) { const tinygltf::Texture &tex = scene.textures.at(diffuseTexName); if (scene.images.find(tex.source) != scene.images.end()) { const tinygltf::Image &image = scene.images.at(tex.source); size_t s = image.image.size() * sizeof(TextureData); cudaMalloc(&dev_diffuseTex, s); cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice); diffuseTexWidth = image.width; diffuseTexHeight = image.height; checkCUDAError("Set Texture Image data"); } } } // TODO: write your code for other materails // You may have to take a look at tinygltfloader // You can also use the above code loading diffuse material as a start point } // ---------Node hierarchy transform-------- cudaDeviceSynchronize(); dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); _nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > ( numVertices, dev_position, dev_normal, matrix, matrixNormal); checkCUDAError("Node hierarchy transformation"); // at the end of the for loop of primitive // push dev pointers to map primitiveVector.push_back(PrimitiveDevBufPointers{ primitive.mode, primitiveType, numPrimitives, numIndices, numVertices, dev_indices, dev_position, dev_normal, dev_texcoord0, dev_diffuseTex, diffuseTexWidth, diffuseTexHeight, dev_vertexOut //VertexOut }); totalNumPrimitives += numPrimitives; } // for each primitive } // for each mesh } // for each node } // 3. Malloc for dev_primitives { cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive)); } // Finally, cudaFree raw dev_bufferViews { std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin()); std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end()); //bufferViewDevPointers for (; it != itEnd; it++) { cudaFree(it->second); } checkCUDAError("Free BufferView Device Mem"); } } __global__ void _vertexTransformAndAssembly( int numVertices, PrimitiveDevBufPointers primitive, glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal, int width, int height) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { // TODO: Apply vertex transformation here glm::vec4 pos = glm::vec4(primitive.dev_position[vid], 1.0f); glm::vec3 nor = primitive.dev_normal[vid]; glm::vec3 eyePos = glm::vec3(MV * pos); // Multiply the MVP matrix for each vertex position, this will transform everything into clipping space pos = MVP * pos;//multiplyMV4(glmMat4ToCudaMat4(MVP), pos); nor = glm::normalize(MV_normal * nor); // Then divide the pos by its w element to transform into NDC space pos /= pos.w; // Finally transform x and y to viewport space pos.x = (pos.x + 1.0) * 0.5f * width; pos.y = (1.0f - pos.y) * 0.5f * height; // TODO: Apply vertex assembly here // Assemble all attribute arraies into the primitive array primitive.dev_verticesOut[vid].pos = pos; primitive.dev_verticesOut[vid].eyeNor = nor; primitive.dev_verticesOut[vid].eyePos = eyePos; primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex; if (primitive.dev_texcoord0 != NULL) { primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth; primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight; primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid]; } } } static int curPrimitiveBeginId = 0; __global__ void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) { // index id int iid = (blockIdx.x * blockDim.x) + threadIdx.x; if (iid < numIndices) { // TODO: uncomment the following code for a start // This is primitive assembly for triangles int pid; // id for cur primitives vector if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) { pid = iid / (int)primitive.primitiveType; dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType] = primitive.dev_verticesOut[primitive.dev_indices[iid]]; } // TODO: other primitive types (point, line) } } __device__ float triArea(const glm::vec3& pt0, const glm::vec3& pt1, const glm::vec3& pt2) { // don't divide by 2 because all calls should use this and we aren't concerned // about the actual value of the area, just the relative values return glm::length(glm::cross(pt0 - pt1, pt0 - pt2)); } __device__ bool isInTriangle(glm::vec3* triPoints, const glm::vec3& pt, float totalArea, float* baryWeights) { baryWeights[2] = triArea(pt, triPoints[0], triPoints[1]); baryWeights[0] = triArea(pt, triPoints[1], triPoints[2]); baryWeights[1] = triArea(pt, triPoints[0], triPoints[2]); return (baryWeights[0] + baryWeights[1] + baryWeights[2]) <= totalArea; } #if BILINEAR_INTERP __device__ glm::vec3 colorFromUV(TextureData* texture, glm::vec2 texCoord, int texWidth, int texHeight) { glm::vec2 scaledTexCoord = texCoord * glm::vec2(texWidth, texHeight); glm::ivec2 intScaledTexCoord = glm::ivec2(scaledTexCoord); glm::ivec2 nextScaledTexCoord = glm::clamp(intScaledTexCoord + glm::ivec2(1), glm::ivec2(0), glm::ivec2(texWidth - 1, texHeight - 1)); int idx = intScaledTexCoord.x + intScaledTexCoord.y * texWidth; glm::vec3 col00 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); idx = nextScaledTexCoord.x + intScaledTexCoord.y * texWidth; glm::vec3 col10 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); idx = intScaledTexCoord.x + nextScaledTexCoord.y * texWidth; glm::vec3 col01 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); idx = nextScaledTexCoord.x + nextScaledTexCoord.y * texWidth; glm::vec3 col11 = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); glm::vec2 diff = scaledTexCoord - glm::vec2(intScaledTexCoord); return (1.0f - diff.x) * (1.0f - diff.y) * col00 + diff.x * (1.0f - diff.y) * col10 + (1.0f - diff.x) * diff.y * col01 + diff.x * diff.y * col11; } #else __device__ glm::vec3 colorFromUV(TextureData* texture, glm::vec2 texCoord, int texWidth, int texHeight) { int idx = (int)(texCoord.x * texWidth) + (int)(texCoord.y * texHeight) * texWidth; glm::vec3 col = glm::vec3(texture[idx * 3] / 255.0f, texture[idx * 3 + 1] / 255.0f, texture[idx * 3 + 2] / 255.0f); return col; } #endif __global__ void rast(Primitive* dev_primitives, int primitivesCount, int w, int h, Fragment *fragmentBuffer, float *dev_depthValues, int *dev_depthLocks) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < primitivesCount) { Primitive& prim = dev_primitives[idx]; // assume triangle glm::vec2 bboxMin = glm::max(glm::vec2(0.0f), glm::min(glm::vec2(prim.v[0].pos), glm::min(glm::vec2(prim.v[1].pos), glm::vec2(prim.v[2].pos)))); glm::vec2 bboxMax = glm::min(glm::vec2((float)(w - 1), (float)(h - 1)), glm::max(glm::vec2(prim.v[0].pos), glm::max(glm::vec2(prim.v[1].pos), glm::vec2(prim.v[2].pos)))); bboxMin = glm::floor(bboxMin); bboxMax = glm::ceil(bboxMax); glm::vec3 triPoints[3]; triPoints[0] = glm::vec3(prim.v[0].pos); //triPoints[0].z = 0.0f; triPoints[1] = glm::vec3(prim.v[1].pos); //triPoints[1].z = 0.0f; triPoints[2] = glm::vec3(prim.v[2].pos); //triPoints[2].z = 0.0f; // make totalArea slightly larger to reduce "shadow acne" due to FP error float totalArea = triArea(triPoints[0], triPoints[1], triPoints[2]) * 1.0001f; float baryWeights[3]; glm::vec3 baryCoords; bool hasTexture = prim.v[0].dev_diffuseTex != NULL; #if RENDER_MODE == RENDER_FULL_TRIANGLE for (float y = bboxMin.y; y <= bboxMax.y; y += 1.0f) { for (float x = bboxMin.x; x <= bboxMax.x; x += 1.0f) { baryCoords = calculateBarycentricCoordinate(triPoints, glm::vec2(x, y)); if (isBarycentricCoordInBounds(baryCoords)) { // TODO: persp-correct baryWeights[0] = baryCoords[0]; baryWeights[1] = baryCoords[1]; baryWeights[2] = baryCoords[2]; #if PERSP_CORRECT float z = 1.0f / (baryWeights[0] / prim.v[0].eyePos.z + baryWeights[1] / prim.v[1].eyePos.z + baryWeights[2] / prim.v[2].eyePos.z); baryWeights[0] *= z / prim.v[0].eyePos.z; baryWeights[1] *= z / prim.v[1].eyePos.z; baryWeights[2] *= z / prim.v[2].eyePos.z; z = -getZAtCoordinate(baryCoords, triPoints); #else float z = -getZAtCoordinate(baryCoords, triPoints); #endif // depth check // lock this fragment on the depth buffer Fragment frag; glm::vec3 nor = baryWeights[0] * prim.v[0].eyeNor + baryWeights[1] * prim.v[1].eyeNor + baryWeights[2] * prim.v[2].eyeNor; // texture mapping if (hasTexture) { glm::vec2 texCoord = baryWeights[0] * prim.v[0].texcoord0 + baryWeights[1] * prim.v[1].texcoord0 + baryWeights[2] * prim.v[2].texcoord0; // check if UV are in range (may not be if Z value is weird) if (texCoord.x < 0.0f || texCoord.x > 1.0f || texCoord.y < 0.0f || texCoord.y > 1.0f) { continue; } frag.shouldShade = true; frag.color = colorFromUV(prim.v[0].dev_diffuseTex, texCoord, prim.v[0].texWidth, prim.v[0].texHeight); } else { // color using normal nor = glm::normalize(nor); // check if coords are in range (may not be if Z value is weird) if (nor.x < -1.0f || nor.x > 1.0f || nor.y < -1.0f || nor.y > 1.0f || nor.z < -1.0f || nor.z > 1.0f) { continue; } frag.shouldShade = false; frag.color = glm::abs(nor); } frag.eyeNor = nor; frag.eyePos = baryWeights[0] * prim.v[0].eyePos + baryWeights[1] * prim.v[1].eyePos + baryWeights[2] * prim.v[2].eyePos; frag.screenCoord = glm::ivec2((int)x, (int)y); frag.depth = z; //frag.color = glm::vec3(z); // add fragment int fragIdx = (int)x + (int)y * w; // magic to make mutex work bool isSet; do { isSet = (atomicCAS(&dev_depthLocks[fragIdx], 0, 1) == 0); if (isSet) { // critical section if (z < dev_depthValues[fragIdx]) { dev_depthValues[fragIdx] = z; fragmentBuffer[fragIdx] = frag; } // unlock fragment dev_depthLocks[fragIdx] = 0;//atomicExch(&dev_depthLocks[fragIdx], 0); } } while (!isSet); } } } #elif RENDER_MODE == RENDER_VERTICES for (int i = 0; i < 3; i++) { const glm::vec3& vert = triPoints[i]; int x = (int)vert.x; int y = (int)vert.y; if (x >= w || y >= h || x < 0 || y < 0) { continue; } Fragment frag; if (hasTexture) { frag.shouldShade = true; frag.color = colorFromUV(prim.v[0].dev_diffuseTex, prim.v[i].texcoord0, prim.v[i].texWidth, prim.v[i].texHeight); } else { frag.shouldShade = false; frag.color = glm::abs(prim.v[i].eyeNor); } frag.eyePos = prim.v[i].eyePos; frag.eyeNor = prim.v[i].eyeNor; float z = vert.z; int xLimit = glm::clamp(x + VERTEX_RENDER_SIZE, x, w - 1); int yLimit = glm::clamp(y + VERTEX_RENDER_SIZE, y, h - 1); for (int fragX = x; fragX <= xLimit; fragX++) { for (int fragY = y; fragY <= yLimit; fragY++) { int fragIdx = fragX + fragY * w; bool isSet; do { isSet = true;// (atomicCAS(&dev_depthLocks[fragIdx], 0, 1) == 0); if (isSet) { // critical section if (z < dev_depthValues[fragIdx]) { dev_depthValues[fragIdx] = z; fragmentBuffer[fragIdx] = frag; } // unlock fragment dev_depthLocks[fragIdx] = 0;//atomicExch(&dev_depthLocks[fragIdx], 0); } } while (!isSet); } } } #elif RENDER_MODE == RENDER_EDGES // Bresenham's line algorithm Fragment frag; frag.shouldShade = false; frag.color = glm::vec3(1.0f); for (int i = 0; i < 3; i++) { glm::ivec2 leftVert; glm::ivec2 rightVert; int nextIdx = i == 2 ? 0 : i + 1; if ((int)triPoints[i].x < 0 || (int)triPoints[i].x >= w || (int)triPoints[nextIdx].x < 0 || (int)triPoints[nextIdx].x >= w || (int)triPoints[i].y < 0 || (int)triPoints[i].y >= h || (int)triPoints[nextIdx].y < 0 || (int)triPoints[nextIdx].y >= h) { continue; } if ((int)triPoints[i].x == (int)triPoints[nextIdx].x) { // vertical line int yStart = glm::max(0, (int)glm::min(triPoints[i].y, triPoints[nextIdx].y)); int yEnd = glm::min(h - 1, (int)glm::max(triPoints[i].y, triPoints[nextIdx].y)); int x = (int)triPoints[i].x; // draw(x,y) for (int y = yStart; y <= yEnd; y++) { int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; } continue; } else if ((int)triPoints[i].y == (int)triPoints[nextIdx].y) { // horizontal line int xStart = glm::max(0, (int)glm::min(triPoints[i].x, triPoints[nextIdx].x)); int xEnd = glm::min(w - 1, (int)glm::max(triPoints[i].x, triPoints[nextIdx].x)); int y = (int)triPoints[i].y; // draw(x,y) for (int x = xStart; x <= xEnd; x++) { int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; } continue; } else if ((int)triPoints[i].x > (int)triPoints[nextIdx].x) { rightVert = glm::ivec2(triPoints[i]); leftVert = glm::ivec2(triPoints[nextIdx]); } else { leftVert = glm::ivec2(triPoints[i]); rightVert = glm::ivec2(triPoints[nextIdx]); } float dErr = abs((triPoints[i].y - triPoints[nextIdx].y) / (triPoints[i].x - triPoints[nextIdx].x)); bool downward = ((rightVert.y - leftVert.y) < 0); int increment = downward ? -1 : 1; float accErr = 0.0f; int xStart = glm::max(leftVert.x, 0); int y = leftVert.y; int xEnd = glm::min(rightVert.x, w - 1); for (int x = xStart; x <= xEnd; x++) { // draw(x,y) if (y < 0 || y >= h) { break; } int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; accErr += dErr; while (accErr >= 0.5f) { y += increment; // draw(x, y) if (y < 0 || y >= h) { break; } if ((downward && y == rightVert.y - 1) || (!downward && y == rightVert.y + 1)) { y -= increment; } int fragIdx = x + y * w; fragmentBuffer[fragIdx] = frag; accErr -= 1.0f; } } } #endif // RENDER_MODE } } /** * Perform rasterization. */ void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockCount2d((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); // for frame buffer; will be different is SSAA_FACTOR > 1 dim3 frameBufferBlockCount2d((width / SSAA_FACTOR - 1) / blockSize2d.x + 1, (height / SSAA_FACTOR - 1) / blockSize2d.y + 1); // set up CUDA timing events cudaEvent_t stageStart, stageEnd; cudaEventCreate(&stageStart); cudaEventCreate(&stageEnd); float vertProcTime = 0.0f; float primAsmTime = 0.0f; #if BACK_FACE_CULLING float cullTime; #endif float rastTime; float fragShaderTime; float copyToPBOTime; // Execute your rasterization pipeline here // (See README for rasterization pipeline outline.) // Vertex Process & primitive assembly { curPrimitiveBeginId = 0; dim3 numThreadsPerBlock(128); auto it = mesh2PrimitivesMap.begin(); auto itEnd = mesh2PrimitivesMap.end(); for (; it != itEnd; ++it) { auto p = (it->second).begin(); // each primitive auto pEnd = (it->second).end(); for (; p != pEnd; ++p) { float measurement; dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); checkCUDAError("pre-Vertex Processing"); cudaEventRecord(stageStart); _vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height); cudaEventRecord(stageEnd); checkCUDAError("post-Vertex Processing"); cudaDeviceSynchronize(); cudaEventSynchronize(stageEnd); cudaEventElapsedTime(&measurement, stageStart, stageEnd); vertProcTime += measurement; cudaEventRecord(stageStart); _primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> > (p->numIndices, curPrimitiveBeginId, dev_primitives, *p); cudaEventRecord(stageEnd); checkCUDAError("Primitive Assembly"); cudaEventSynchronize(stageEnd); cudaEventElapsedTime(&measurement, stageStart, stageEnd); primAsmTime += measurement; curPrimitiveBeginId += p->numPrimitives; } } checkCUDAError("Vertex Processing and Primitive Assembly"); } cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); initDepthValues << <blockCount2d, blockSize2d >> >(width, height, dev_depthValues); cudaMemset(dev_depthLocks, 0, width * height * sizeof(int)); // test screen-space tri #if 0 Primitive prim; prim.v[0].pos = glm::vec4(100.0f, 100.0f, 0.0f, 1.0f); prim.v[1].pos = glm::vec4(800.0f, 100.0f, 0.0f, 1.0f); prim.v[2].pos = glm::vec4(200.0f, 800.0f, 0.0f, 1.0f); cudaMemcpy(dev_primitives, &prim, sizeof(Primitive), cudaMemcpyHostToDevice); curPrimitiveBeginId = 1; #endif // back-face culling #if BACK_FACE_CULLING cudaEventRecord(stageStart); Primitive *newEnd = thrust::remove_if(thrust::device, dev_primitives, dev_primitives + curPrimitiveBeginId, shouldCull()); #if CUDA_MEASURE cullCountAcc += (dev_primitives + curPrimitiveBeginId) - newEnd; #endif //printf("culled: %d", (dev_primitives + curPrimitiveBeginId) - newEnd); curPrimitiveBeginId = newEnd - dev_primitives; cudaEventRecord(stageEnd); cudaEventSynchronize(stageEnd); cudaEventElapsedTime(&cullTime, stageStart, stageEnd); #endif // TODO: rasterize checkCUDAError("pre-actual rasterizer"); cudaEventRecord(stageStart); rast << <dim3(curPrimitiveBeginId / 32 + 1), dim3(32) >> > (dev_primitives, curPrimitiveBeginId, width, height, dev_fragmentBuffer, dev_depthValues, dev_depthLocks); cudaEventRecord(stageEnd); cudaEventSynchronize(stageEnd); cudaEventElapsedTime(&rastTime, stageStart, stageEnd); checkCUDAError("post-actual rasterizer"); #if 0 float *buf = (float *)malloc(width * height * sizeof(float)); cudaMemcpy(buf, dev_depthValues, width * height * sizeof(float), cudaMemcpyDeviceToHost); int ct = 0; for (int i = 0; i < width * height; i++) { if (buf[i] <= 0.0f) { printf("%.3f ", buf[i]); ct++; } } printf("ct: %d", ct); printf("\n"); free(buf); while (1); #endif // Copy depthbuffer colors into framebuffer cudaEventRecord(stageStart); render << <frameBufferBlockCount2d, blockSize2d >> >(width / SSAA_FACTOR, height / SSAA_FACTOR, dev_fragmentBuffer, dev_framebuffer); cudaEventRecord(stageEnd); cudaEventSynchronize(stageEnd); cudaEventElapsedTime(&fragShaderTime, stageStart, stageEnd); checkCUDAError("fragment shader"); cudaEventRecord(stageStart); // Copy framebuffer into OpenGL buffer for OpenGL previewing sendImageToPBO<<<frameBufferBlockCount2d, blockSize2d >>>(pbo, width / SSAA_FACTOR, height / SSAA_FACTOR, dev_framebuffer); cudaEventRecord(stageEnd); cudaEventSynchronize(stageEnd); cudaEventElapsedTime(&copyToPBOTime, stageStart, stageEnd); checkCUDAError("copy render result to pbo"); #if CUDA_MEASURE measureCount++; vertProcTimeAcc += vertProcTime; primAsmTimeAcc += primAsmTime; #if BACK_FACE_CULLING cullTimeAcc += cullTime; #endif rastTimeAcc += rastTime; fragShaderTimeAcc += fragShaderTime; copyToPBOTimeAcc += copyToPBOTime; if (measureCount >= MEASURE_COUNT_MAX) { // print measurements printf("Vertex Processing: %.4f\n", vertProcTimeAcc / (float)measureCount); printf("Primitive Assembly: %.4f\n", primAsmTimeAcc / (float)measureCount); #if BACK_FACE_CULLING printf("Back-face Culling: %.4f\n", cullTimeAcc / (float)measureCount); printf("Faces Culled: %.4f\n", (float)(cullCountAcc) / (float)measureCount); #endif printf("Rasterizer: %.4f\n", rastTimeAcc / (float)measureCount); printf("Fragment Shader: %.4f\n", fragShaderTimeAcc / (float)measureCount); printf("Copy to PBO: %.4f\n", copyToPBOTimeAcc / (float)measureCount); printf("\n"); measureCount = 0; vertProcTimeAcc = 0.0f; primAsmTimeAcc = 0.0f; #if BACK_FACE_CULLING cullTimeAcc = 0.0f; cullCountAcc = 0; #endif rastTimeAcc = 0.0f; fragShaderTimeAcc = 0.0f; copyToPBOTimeAcc = 0.0f; } #endif } /** * Called once at the end of the program to free CUDA memory. */ void rasterizeFree() { // deconstruct primitives attribute/indices device buffer auto it(mesh2PrimitivesMap.begin()); auto itEnd(mesh2PrimitivesMap.end()); for (; it != itEnd; ++it) { for (auto p = it->second.begin(); p != it->second.end(); ++p) { cudaFree(p->dev_indices); cudaFree(p->dev_position); cudaFree(p->dev_normal); cudaFree(p->dev_texcoord0); cudaFree(p->dev_diffuseTex); cudaFree(p->dev_verticesOut); //TODO: release other attributes and materials } } //////////// cudaFree(dev_primitives); dev_primitives = NULL; cudaFree(dev_fragmentBuffer); dev_fragmentBuffer = NULL; cudaFree(dev_framebuffer); dev_framebuffer = NULL; cudaFree(dev_depth); dev_depth = NULL; cudaFree(dev_depthValues); dev_depthValues = NULL; cudaFree(dev_depthLocks); dev_depthLocks = NULL; checkCUDAError("rasterize Free"); }
80aa013fb115f80807880a345dd3b1dbe0d71d74.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <ATen/Context.h> #include <c10/hip/HIPFunctions.h> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include "hipcub/hipcub.hpp" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to HIPGuardMasqueradingAsCUDA CUDAContext context(-1); // take current device CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is guarded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } C10_LOG_API_USAGE_ONCE("caffe2.init.cuda"); // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { HIPGuardMasqueradingAsCUDA g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = ::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for hipDeviceEnablePeerAccess that should always be // zero currently. // It is ok if peer access is already enabled... hipError_t err = hipDeviceEnablePeerAccess(j, 0); if ((err != hipErrorPeerAccessAlreadyEnabled) && (err != hipSuccess)) { CAFFE_THROW(hipGetErrorString(err)); } hipGetLastError(); // reset cuda error code } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new hipcub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; // Initialize caching allocator at::globalContext().lazyInitCUDA(); } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } /** * An allocator that does the CPU memory allocation with pinned memory. * * This is needed because if we want to do any asynchronous cuda memcpy, * the underlying CPU memory also needs to be allocated into pinned memory * space. As a result, whenever Caffe2 is built with GPU and there is * GPU present during runtime, at global initialization time we will set * the CPU memory allocator to allocate pinned memory. * * NB: This behavior is probably too aggressive. We should consider asking users * to do on-demand memory pinning (like exposed in PyTorch APIs) instead. */ struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator { PinnedCPUAllocator() { baseAllocator_ = GetDefaultCPUAllocator(); } ~PinnedCPUAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { if (nbytes == 0) { // replicate c10::alloc_cpu behavior - return nullptr return {nullptr, nullptr, &Delete, at::Device(CPU)}; } void* data; at::DataPtr data_ptr; std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter(); data_ptr = baseAllocator_->allocate(nbytes); data = data_ptr.get(); CAFFE_ENFORCE(data); CUDA_ENFORCE(hipHostRegister(data, nbytes, hipHostRegisterDefault)); CAFFE_ENFORCE( data_ptr.compare_exchange_deleter(expected_deleter, &Delete), "Failed to swap deleter (already swapped?)"); } else { CUDA_ENFORCE(hipHostMalloc(&data, nbytes)); profiledCPUMemoryReporter().New(data, nbytes); data_ptr = {data, data, &Delete, at::Device(CPU)}; } memset(data, 0, nbytes); return data_ptr; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* data) { if (!data) { return; } // Caffe2 uses a lazy way to figure out if one is actually going to use GPUs // or not. If a CUDAContext::New() call is made, inside the CUDAContext // function we will switch the cpu side allocator to a PinnedCPUAllocator. // But, if one calls CPUContext::New() before any cuda allocations, // PinnedCPUAllocator can still delete the corresponding memory. std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { CUDA_ENFORCE(hipHostUnregister(data)); GetDefaultCPUAllocator()->raw_deleter()(data); } else { hipError_t err = hipHostFree(data); profiledCPUMemoryReporter().Delete(data); if (err == hipErrorInvalidValue) { free(data); // Calling hipGetLastError will reset the cuda error. hipError_t _err = hipGetLastError(); } else { // For all other errors, still do a cuda check. CUDA_ENFORCE(err); } } } at::Allocator* baseAllocator_; }; static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if C10_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "hipHostMalloc. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; // If CUDA is enabled, using CPU allocators other than PinnedCPUAllocator // will cause memory corruptions. Therefore, we need to set the priority // to highest to avoid being overwritten. SetCPUAllocator( &g_pinned_cpu_alloc, std::numeric_limits<uint8_t>::max() /* priority */); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } CUDAContext::~CUDAContext() { try { if (curand_generator_) { CURAND_CHECK(hiprandDestroyGenerator(curand_generator_)); } // CUDAContext is used in 2 cases now: // - long-lived instance inside OperatorBase in which case what happens in // destructor doesn't really matter // - short-lived on-the-fly instances that are utilized as HIPGuardMasqueradingAsCUDA - in // this case there's only one stream id (passed to SwitchToDevice) and // it's preferrable to synchronize in the destructor FinishDeviceComputation(); } catch (const std::exception& e) { LOG(ERROR) << "Encountered following in " << __FUNCTION__ << ": " << e.what(); } } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = ::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: if (nbytes != 0) { CUDA_ENFORCE(hipMalloc(&ptr, nbytes)); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: if (nbytes != 0) { CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); } g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. hip::HIPStreamGuardMasqueradingAsCUDA g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = hip::HIPCachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple hipFree. hipError_t error = hipFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != hipSuccess && error != hipErrorDeinitialized) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << hipGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { hip::HIPCachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
80aa013fb115f80807880a345dd3b1dbe0d71d74.cu
#include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <ATen/Context.h> #include <c10/cuda/CUDAFunctions.h> #include <c10/cuda/CUDACachingAllocator.h> #include "cub/util_allocator.cuh" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to CUDAGuard CUDAContext context(-1); // take current device CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is guarded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } C10_LOG_API_USAGE_ONCE("caffe2.init.cuda"); // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { CUDAGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = std::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for cudaDeviceEnablePeerAccess that should always be // zero currently. // It is ok if peer access is already enabled... cudaError_t err = cudaDeviceEnablePeerAccess(j, 0); if ((err != cudaErrorPeerAccessAlreadyEnabled) && (err != cudaSuccess)) { CAFFE_THROW(cudaGetErrorString(err)); } cudaGetLastError(); // reset cuda error code } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new cub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; // Initialize caching allocator at::globalContext().lazyInitCUDA(); } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } /** * An allocator that does the CPU memory allocation with pinned memory. * * This is needed because if we want to do any asynchronous cuda memcpy, * the underlying CPU memory also needs to be allocated into pinned memory * space. As a result, whenever Caffe2 is built with GPU and there is * GPU present during runtime, at global initialization time we will set * the CPU memory allocator to allocate pinned memory. * * NB: This behavior is probably too aggressive. We should consider asking users * to do on-demand memory pinning (like exposed in PyTorch APIs) instead. */ struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator { PinnedCPUAllocator() { baseAllocator_ = GetDefaultCPUAllocator(); } ~PinnedCPUAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { if (nbytes == 0) { // replicate c10::alloc_cpu behavior - return nullptr return {nullptr, nullptr, &Delete, at::Device(CPU)}; } void* data; at::DataPtr data_ptr; std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter(); data_ptr = baseAllocator_->allocate(nbytes); data = data_ptr.get(); CAFFE_ENFORCE(data); CUDA_ENFORCE(cudaHostRegister(data, nbytes, cudaHostRegisterDefault)); CAFFE_ENFORCE( data_ptr.compare_exchange_deleter(expected_deleter, &Delete), "Failed to swap deleter (already swapped?)"); } else { CUDA_ENFORCE(cudaMallocHost(&data, nbytes)); profiledCPUMemoryReporter().New(data, nbytes); data_ptr = {data, data, &Delete, at::Device(CPU)}; } memset(data, 0, nbytes); return data_ptr; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* data) { if (!data) { return; } // Caffe2 uses a lazy way to figure out if one is actually going to use GPUs // or not. If a CUDAContext::New() call is made, inside the CUDAContext // function we will switch the cpu side allocator to a PinnedCPUAllocator. // But, if one calls CPUContext::New() before any cuda allocations, // PinnedCPUAllocator can still delete the corresponding memory. std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { CUDA_ENFORCE(cudaHostUnregister(data)); GetDefaultCPUAllocator()->raw_deleter()(data); } else { cudaError_t err = cudaFreeHost(data); profiledCPUMemoryReporter().Delete(data); if (err == cudaErrorInvalidValue) { free(data); // Calling cudaGetLastError will reset the cuda error. cudaError_t _err = cudaGetLastError(); } else { // For all other errors, still do a cuda check. CUDA_ENFORCE(err); } } } at::Allocator* baseAllocator_; }; static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if C10_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "cudaMallocHost. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; // If CUDA is enabled, using CPU allocators other than PinnedCPUAllocator // will cause memory corruptions. Therefore, we need to set the priority // to highest to avoid being overwritten. SetCPUAllocator( &g_pinned_cpu_alloc, std::numeric_limits<uint8_t>::max() /* priority */); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } CUDAContext::~CUDAContext() { try { if (curand_generator_) { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); } // CUDAContext is used in 2 cases now: // - long-lived instance inside OperatorBase in which case what happens in // destructor doesn't really matter // - short-lived on-the-fly instances that are utilized as CUDAGuard - in // this case there's only one stream id (passed to SwitchToDevice) and // it's preferrable to synchronize in the destructor FinishDeviceComputation(); } catch (const std::exception& e) { LOG(ERROR) << "Encountered following in " << __FUNCTION__ << ": " << e.what(); } } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = std::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: if (nbytes != 0) { CUDA_ENFORCE(cudaMalloc(&ptr, nbytes)); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: if (nbytes != 0) { CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); } g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. cuda::CUDAStreamGuard g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = cuda::CUDACachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple cudaFree. cudaError_t error = cudaFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != cudaSuccess && error != cudaErrorCudartUnloading) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << cudaGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { cuda::CUDACachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
6bcc39a679d96e885aff4894a7208b0e01b295e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Program to compute swaption portfolio using NVIDIA CUDA */ #include <stdio.h> #include <cutil.h> // parameters for nVidia device execution #define BLOCK_SIZE 64 #define GRID_SIZE 64 // parameters for LIBOR calculation #define NN 80 #define NMAT 40 #define L2_SIZE 3280 //NN*(NMAT+1) #define NOPT 15 #define NPATH 4096 // constant data for swaption portfolio: stored in device memory, // initialised by host and read by device threads __constant__ int N, Nmat, Nopt, maturities[NOPT]; __constant__ float delta, swaprates[NOPT], lambda[NN]; /* Monte Carlo LIBOR path calculation */ __device__ void path_calc(float *L, float *z) { int i, n; float sqez, lam, con1, v, vrat; for(n=0; n<Nmat; n++) { sqez = sqrtf(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; } } } /* forward path calculation storing data for subsequent reverse path calculation */ __device__ void path_calc_b1(float *L, float *z, float *L2) { int i, n; float sqez, lam, con1, v, vrat; for (i=0; i<N; i++) L2[i] = L[i]; for(n=0; n<Nmat; n++) { sqez = sqrt(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; // store these values for reverse path // L2[i+(n+1)*N] = L[i]; } } } /* reverse path calculation of deltas using stored data */ __device__ void path_calc_b2(float *L_b, float *z, float *L2) { int i, n; float faci, v1; for (n=Nmat-1; n>=0; n--) { v1 = 0.0; for (i=N-1; i>n; i--) { v1 += lambda[i-n-1]*L2[i+(n+1)*N]*L_b[i]; faci = __fdividef(delta,1.0+delta*L2[i+n*N]); L_b[i] = L_b[i]*__fdividef(L2[i+(n+1)*N],L2[i+n*N]) + v1*lambda[i-n-1]*faci*faci; } } } /* calculate the portfolio value v, and its sensitivity to L */ /* hand-coded reverse mode sensitivity */ __device__ float portfolio_b(float *L, float *L_b) { int m, n; float b, s, swapval,v; float B[NMAT], S[NMAT], B_b[NMAT], S_b[NMAT]; b = 1.0; s = 0.0; for (m=0; m<N-Nmat; m++) { n = m + Nmat; b = __fdividef(b,1.0+delta*L[n]); s = s + delta*b; B[m] = b; S[m] = s; } v = 0.0; for (m=0; m<N-Nmat; m++) { B_b[m] = 0; S_b[m] = 0; } for (n=0; n<Nopt; n++){ m = maturities[n] - 1; swapval = B[m] + swaprates[n]*S[m] - 1.0; if (swapval<0) { v += -100*swapval; S_b[m] += -100*swaprates[n]; B_b[m] += -100; } } for (m=N-Nmat-1; m>=0; m--) { n = m + Nmat; B_b[m] += delta*S_b[m]; L_b[n] = -B_b[m]*B[m]*__fdividef(delta,1.0+delta*L[n]); if (m>0) { S_b[m-1] += S_b[m]; B_b[m-1] += __fdividef(B_b[m],1.+delta*L[n]); } } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; for (n=0; n<Nmat; n++){ L_b[n] = -v*delta/(1.0+delta*L[n]); } for (n=Nmat; n<N; n++){ L_b[n] = b*L_b[n]; } return v; } /* calculate the portfolio value v */ __device__ float portfolio(float *L) { int n, m, i; float v, b, s, swapval, B[40], S[40]; b = 1.0; s = 0.0; for(n=Nmat; n<N; n++) { b = b/(1.0+delta*L[n]); s = s + delta*b; B[n-Nmat] = b; S[n-Nmat] = s; } v = 0.0; for(i=0; i<Nopt; i++){ m = maturities[i] -1; swapval = B[m] + swaprates[i]*S[m] - 1.0; if(swapval<0) v += -100.0*swapval; } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; return v; } __global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i,path; float L[NN], L2[L2_SIZE], z[NN]; float *L_b = L; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc_b1(L, z, L2); d_v[path] = portfolio_b(L,L_b); path_calc_b2(L_b, z, L2); d_Lb[path] = L_b[NN-1]; } } __global__ void Pathcalc_Portfolio_KernelGPU2(float *d_v) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i, path; float L[NN], z[NN]; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc(L, z); d_v[path] = portfolio(L); } } //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space float *h_v, *h_Lb, h_lambda[NN], h_delta=0.25; int h_N=NN, h_Nmat=NMAT, h_Nopt=NOPT, i; int h_maturities[] = {4,4,4,8,8,8,20,20,20,28,28,28,40,40,40}; float h_swaprates[] = {.045,.05,.055,.045,.05,.055,.045,.05, .055,.045,.05,.055,.045,.05,.055 }; double v, Lb; unsigned int hTimer; double gpuTime; // 'd_' prefix - GPU (device) memory space float *d_v,*d_Lb; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(hipSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); for (i=0; i<NN; i++) h_lambda[i] = 0.2; // Copy all constants into constant memory hipMemcpyToSymbol(N, &h_N, sizeof(h_N)); hipMemcpyToSymbol(Nmat, &h_Nmat, sizeof(h_Nmat)); hipMemcpyToSymbol(Nopt, &h_Nopt, sizeof(h_Nopt)); hipMemcpyToSymbol(delta, &h_delta, sizeof(h_delta)); hipMemcpyToSymbol(maturities, &h_maturities, sizeof(h_maturities)); hipMemcpyToSymbol(swaprates, &h_swaprates, sizeof(h_swaprates)); hipMemcpyToSymbol(lambda, &h_lambda, sizeof(h_lambda)); // Allocate memory on host and device h_v = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( hipMalloc((void **)&d_v, sizeof(float)*NPATH) ); h_Lb = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( hipMalloc((void **)&d_Lb, sizeof(float)*NPATH) ); // Execute GPU kernel -- no Greeks CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); // Set up the execution configuration dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(GRID_SIZE); // Launch the device computation threads hipLaunchKernelGGL(( Pathcalc_Portfolio_KernelGPU2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_v); CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU2() execution failed\n"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( hipMemcpy(h_v, d_v, sizeof(float)*NPATH, hipMemcpyDeviceToHost) ); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; printf("v = %15.8f\n", v); printf("Time(No Greeks) : %f msec\n", gpuTime); // Execute GPU kernel -- Greeks CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); // Launch the device computation threads hipLaunchKernelGGL(( Pathcalc_Portfolio_KernelGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_v,d_Lb); CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU() execution failed\n"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( hipMemcpy(h_v, d_v, sizeof(float)*NPATH, hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(h_Lb, d_Lb, sizeof(float)*NPATH, hipMemcpyDeviceToHost) ); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; Lb = 0.0; for (i=0; i<NPATH; i++) Lb += h_Lb[i]; Lb = Lb / NPATH; printf("v = %15.8f\n", v); printf("Lb = %15.8f\n", Lb); printf("Time (Greeks) : %f msec\n", gpuTime); // Release GPU memory CUDA_SAFE_CALL( hipFree(d_v)); CUDA_SAFE_CALL( hipFree(d_Lb)); // Release CPU memory free(h_v); free(h_Lb); CUT_SAFE_CALL( cutDeleteTimer(hTimer) ); //CUT_EXIT(argc, argv); }
6bcc39a679d96e885aff4894a7208b0e01b295e5.cu
/* Program to compute swaption portfolio using NVIDIA CUDA */ #include <stdio.h> #include <cutil.h> // parameters for nVidia device execution #define BLOCK_SIZE 64 #define GRID_SIZE 64 // parameters for LIBOR calculation #define NN 80 #define NMAT 40 #define L2_SIZE 3280 //NN*(NMAT+1) #define NOPT 15 #define NPATH 4096 // constant data for swaption portfolio: stored in device memory, // initialised by host and read by device threads __constant__ int N, Nmat, Nopt, maturities[NOPT]; __constant__ float delta, swaprates[NOPT], lambda[NN]; /* Monte Carlo LIBOR path calculation */ __device__ void path_calc(float *L, float *z) { int i, n; float sqez, lam, con1, v, vrat; for(n=0; n<Nmat; n++) { sqez = sqrtf(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; } } } /* forward path calculation storing data for subsequent reverse path calculation */ __device__ void path_calc_b1(float *L, float *z, float *L2) { int i, n; float sqez, lam, con1, v, vrat; for (i=0; i<N; i++) L2[i] = L[i]; for(n=0; n<Nmat; n++) { sqez = sqrt(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; // store these values for reverse path // L2[i+(n+1)*N] = L[i]; } } } /* reverse path calculation of deltas using stored data */ __device__ void path_calc_b2(float *L_b, float *z, float *L2) { int i, n; float faci, v1; for (n=Nmat-1; n>=0; n--) { v1 = 0.0; for (i=N-1; i>n; i--) { v1 += lambda[i-n-1]*L2[i+(n+1)*N]*L_b[i]; faci = __fdividef(delta,1.0+delta*L2[i+n*N]); L_b[i] = L_b[i]*__fdividef(L2[i+(n+1)*N],L2[i+n*N]) + v1*lambda[i-n-1]*faci*faci; } } } /* calculate the portfolio value v, and its sensitivity to L */ /* hand-coded reverse mode sensitivity */ __device__ float portfolio_b(float *L, float *L_b) { int m, n; float b, s, swapval,v; float B[NMAT], S[NMAT], B_b[NMAT], S_b[NMAT]; b = 1.0; s = 0.0; for (m=0; m<N-Nmat; m++) { n = m + Nmat; b = __fdividef(b,1.0+delta*L[n]); s = s + delta*b; B[m] = b; S[m] = s; } v = 0.0; for (m=0; m<N-Nmat; m++) { B_b[m] = 0; S_b[m] = 0; } for (n=0; n<Nopt; n++){ m = maturities[n] - 1; swapval = B[m] + swaprates[n]*S[m] - 1.0; if (swapval<0) { v += -100*swapval; S_b[m] += -100*swaprates[n]; B_b[m] += -100; } } for (m=N-Nmat-1; m>=0; m--) { n = m + Nmat; B_b[m] += delta*S_b[m]; L_b[n] = -B_b[m]*B[m]*__fdividef(delta,1.0+delta*L[n]); if (m>0) { S_b[m-1] += S_b[m]; B_b[m-1] += __fdividef(B_b[m],1.+delta*L[n]); } } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; for (n=0; n<Nmat; n++){ L_b[n] = -v*delta/(1.0+delta*L[n]); } for (n=Nmat; n<N; n++){ L_b[n] = b*L_b[n]; } return v; } /* calculate the portfolio value v */ __device__ float portfolio(float *L) { int n, m, i; float v, b, s, swapval, B[40], S[40]; b = 1.0; s = 0.0; for(n=Nmat; n<N; n++) { b = b/(1.0+delta*L[n]); s = s + delta*b; B[n-Nmat] = b; S[n-Nmat] = s; } v = 0.0; for(i=0; i<Nopt; i++){ m = maturities[i] -1; swapval = B[m] + swaprates[i]*S[m] - 1.0; if(swapval<0) v += -100.0*swapval; } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; return v; } __global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i,path; float L[NN], L2[L2_SIZE], z[NN]; float *L_b = L; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc_b1(L, z, L2); d_v[path] = portfolio_b(L,L_b); path_calc_b2(L_b, z, L2); d_Lb[path] = L_b[NN-1]; } } __global__ void Pathcalc_Portfolio_KernelGPU2(float *d_v) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i, path; float L[NN], z[NN]; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc(L, z); d_v[path] = portfolio(L); } } //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space float *h_v, *h_Lb, h_lambda[NN], h_delta=0.25; int h_N=NN, h_Nmat=NMAT, h_Nopt=NOPT, i; int h_maturities[] = {4,4,4,8,8,8,20,20,20,28,28,28,40,40,40}; float h_swaprates[] = {.045,.05,.055,.045,.05,.055,.045,.05, .055,.045,.05,.055,.045,.05,.055 }; double v, Lb; unsigned int hTimer; double gpuTime; // 'd_' prefix - GPU (device) memory space float *d_v,*d_Lb; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(cudaSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); for (i=0; i<NN; i++) h_lambda[i] = 0.2; // Copy all constants into constant memory cudaMemcpyToSymbol(N, &h_N, sizeof(h_N)); cudaMemcpyToSymbol(Nmat, &h_Nmat, sizeof(h_Nmat)); cudaMemcpyToSymbol(Nopt, &h_Nopt, sizeof(h_Nopt)); cudaMemcpyToSymbol(delta, &h_delta, sizeof(h_delta)); cudaMemcpyToSymbol(maturities, &h_maturities, sizeof(h_maturities)); cudaMemcpyToSymbol(swaprates, &h_swaprates, sizeof(h_swaprates)); cudaMemcpyToSymbol(lambda, &h_lambda, sizeof(h_lambda)); // Allocate memory on host and device h_v = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( cudaMalloc((void **)&d_v, sizeof(float)*NPATH) ); h_Lb = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( cudaMalloc((void **)&d_Lb, sizeof(float)*NPATH) ); // Execute GPU kernel -- no Greeks CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); // Set up the execution configuration dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(GRID_SIZE); // Launch the device computation threads Pathcalc_Portfolio_KernelGPU2<<<dimGrid, dimBlock>>>(d_v); CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU2() execution failed\n"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( cudaMemcpy(h_v, d_v, sizeof(float)*NPATH, cudaMemcpyDeviceToHost) ); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; printf("v = %15.8f\n", v); printf("Time(No Greeks) : %f msec\n", gpuTime); // Execute GPU kernel -- Greeks CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); // Launch the device computation threads Pathcalc_Portfolio_KernelGPU<<<dimGrid, dimBlock>>>(d_v,d_Lb); CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU() execution failed\n"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( cudaMemcpy(h_v, d_v, sizeof(float)*NPATH, cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(h_Lb, d_Lb, sizeof(float)*NPATH, cudaMemcpyDeviceToHost) ); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; Lb = 0.0; for (i=0; i<NPATH; i++) Lb += h_Lb[i]; Lb = Lb / NPATH; printf("v = %15.8f\n", v); printf("Lb = %15.8f\n", Lb); printf("Time (Greeks) : %f msec\n", gpuTime); // Release GPU memory CUDA_SAFE_CALL( cudaFree(d_v)); CUDA_SAFE_CALL( cudaFree(d_Lb)); // Release CPU memory free(h_v); free(h_Lb); CUT_SAFE_CALL( cutDeleteTimer(hTimer) ); //CUT_EXIT(argc, argv); }
f6a2a7b58aa84a70b63086c117e4d9c054852cb7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gradient2d-512-16-512_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_16(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; double __reg_15_0; double __reg_15_1; double __reg_15_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16); const AN5D_TYPE __storeValid = __writeValid16; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC15(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid15) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_15_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_15_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_15_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_15_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_15_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_15_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_15_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_15_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_15_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_15_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_15_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_15_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_15_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_15_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_15_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_15_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(1, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(2, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(3, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(4, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(5, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(6, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(7, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(8, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(9, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(10, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(11, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(12, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(13, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(14, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(15, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_0_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_0_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_0_1); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_15_0); __STORE(__h + 0, __reg_15_2, __reg_15_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } } __global__ void kernel0_15(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __storeValid = __writeValid15; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_14_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_14_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_14_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_14_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_14_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_14_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_14_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_14_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_14_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_14_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_14_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_14_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_14_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_14_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_14_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(2, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(3, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(4, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(5, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(6, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(7, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(8, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(9, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(10, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(11, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(12, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(13, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(14, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h + 0, __reg_14_0, __reg_14_1, __reg_0_2); } } else { for (__h = 31; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; } } __global__ void kernel0_14(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __storeValid = __writeValid14; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_13_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_13_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_13_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_13_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_13_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_13_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_13_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_13_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_13_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_13_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_13_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_13_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_13_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_13_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(1, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(3, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(4, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(5, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(6, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(7, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(8, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(9, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(10, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(11, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(12, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(13, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h + 0, __reg_13_1, __reg_13_2, __reg_0_0); } } else { for (__h = 29; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } } __global__ void kernel0_13(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_12_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(3, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(4, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(5, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(6, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(7, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(8, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(9, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(10, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(11, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(12, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; } } __global__ void kernel0_12(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_11_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(2, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(3, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(4, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(5, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(6, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(7, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(8, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(9, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(10, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(11, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } } __global__ void kernel0_11(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_10_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(1, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(3, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(4, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(5, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(6, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(7, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(8, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(9, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(10, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; } } __global__ void kernel0_10(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
f6a2a7b58aa84a70b63086c117e4d9c054852cb7.cu
#include "gradient2d-512-16-512_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_16(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; double __reg_15_0; double __reg_15_1; double __reg_15_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16); const AN5D_TYPE __storeValid = __writeValid16; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC15(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid15) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_15_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_15_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_15_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_15_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_15_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_15_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_15_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_15_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_15_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_15_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_15_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_15_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_15_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_15_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_15_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_15_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(1, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(2, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(3, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(4, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(5, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(6, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(7, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(8, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(9, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(10, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(11, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(12, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(13, __reg_15_0, __reg_15_1, __reg_15_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(14, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(15, __reg_15_2, __reg_15_0, __reg_15_1); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(16, __reg_15_0, __reg_15_1, __reg_15_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_0_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_0_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 15, __reg_15_2, __reg_15_0, __reg_15_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 14, __reg_15_0, __reg_15_1, __reg_15_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 13, __reg_15_1, __reg_15_2, __reg_15_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 12, __reg_15_2, __reg_15_0, __reg_15_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 11, __reg_15_0, __reg_15_1, __reg_15_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 10, __reg_15_1, __reg_15_2, __reg_15_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 9, __reg_15_2, __reg_15_0, __reg_15_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 8, __reg_15_0, __reg_15_1, __reg_15_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 7, __reg_15_1, __reg_15_2, __reg_15_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 6, __reg_15_2, __reg_15_0, __reg_15_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 5, __reg_15_0, __reg_15_1, __reg_15_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 4, __reg_15_1, __reg_15_2, __reg_15_0); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 3, __reg_15_2, __reg_15_0, __reg_15_1); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 2, __reg_15_0, __reg_15_1, __reg_15_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_0_1); __STORE(__h - 1, __reg_15_1, __reg_15_2, __reg_15_0); __STORE(__h + 0, __reg_15_2, __reg_15_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __CALC15(__reg_15_0, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h - 16, __reg_15_1, __reg_15_2, __reg_15_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __CALC15(__reg_15_1, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 16, __reg_15_2, __reg_15_0, __reg_15_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __CALC15(__reg_15_2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 16, __reg_15_0, __reg_15_1, __reg_15_2); __h++; } } __global__ void kernel0_15(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __storeValid = __writeValid15; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC14(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid14) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_14_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_14_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_14_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_14_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_14_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_14_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_14_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_14_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_14_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_14_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_14_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_14_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_14_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_14_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_14_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(1, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(2, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(3, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(4, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(5, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(6, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(7, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(8, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(9, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(10, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(11, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(12, __reg_14_2, __reg_14_0, __reg_14_1); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(13, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(14, __reg_14_1, __reg_14_2, __reg_14_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 29); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(15, __reg_14_2, __reg_14_0, __reg_14_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_0_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_0_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 14, __reg_14_1, __reg_14_2, __reg_14_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 13, __reg_14_2, __reg_14_0, __reg_14_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 12, __reg_14_0, __reg_14_1, __reg_14_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 11, __reg_14_1, __reg_14_2, __reg_14_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 10, __reg_14_2, __reg_14_0, __reg_14_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 9, __reg_14_0, __reg_14_1, __reg_14_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 8, __reg_14_1, __reg_14_2, __reg_14_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 7, __reg_14_2, __reg_14_0, __reg_14_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 6, __reg_14_0, __reg_14_1, __reg_14_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 5, __reg_14_1, __reg_14_2, __reg_14_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 4, __reg_14_2, __reg_14_0, __reg_14_1); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 3, __reg_14_0, __reg_14_1, __reg_14_2); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 2, __reg_14_1, __reg_14_2, __reg_14_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_0_2); __STORE(__h - 1, __reg_14_2, __reg_14_0, __reg_14_1); __STORE(__h + 0, __reg_14_0, __reg_14_1, __reg_0_2); } } else { for (__h = 31; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __CALC14(__reg_14_2, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 15, __reg_14_0, __reg_14_1, __reg_14_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __CALC14(__reg_14_0, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 15, __reg_14_1, __reg_14_2, __reg_14_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __CALC14(__reg_14_1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h - 15, __reg_14_2, __reg_14_0, __reg_14_1); __h++; } } __global__ void kernel0_14(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __storeValid = __writeValid14; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC13(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid13) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_13_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_13_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_13_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_13_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_13_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_13_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_13_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_13_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_13_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_13_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_13_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_13_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_13_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_13_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(1, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(2, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(3, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(4, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(5, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(6, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(7, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(8, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(9, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(10, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(11, __reg_13_1, __reg_13_2, __reg_13_0); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(12, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(13, __reg_13_0, __reg_13_1, __reg_13_2); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 27); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 28); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(14, __reg_13_1, __reg_13_2, __reg_13_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_0_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_0_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 13, __reg_13_0, __reg_13_1, __reg_13_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 12, __reg_13_1, __reg_13_2, __reg_13_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 11, __reg_13_2, __reg_13_0, __reg_13_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 10, __reg_13_0, __reg_13_1, __reg_13_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 9, __reg_13_1, __reg_13_2, __reg_13_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 8, __reg_13_2, __reg_13_0, __reg_13_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 7, __reg_13_0, __reg_13_1, __reg_13_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 6, __reg_13_1, __reg_13_2, __reg_13_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 5, __reg_13_2, __reg_13_0, __reg_13_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 4, __reg_13_0, __reg_13_1, __reg_13_2); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 3, __reg_13_1, __reg_13_2, __reg_13_0); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 2, __reg_13_2, __reg_13_0, __reg_13_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_0_0); __STORE(__h - 1, __reg_13_0, __reg_13_1, __reg_13_2); __STORE(__h + 0, __reg_13_1, __reg_13_2, __reg_0_0); } } else { for (__h = 29; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC13(__reg_13_1, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 14, __reg_13_2, __reg_13_0, __reg_13_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __CALC13(__reg_13_2, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h - 14, __reg_13_0, __reg_13_1, __reg_13_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __CALC13(__reg_13_0, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 14, __reg_13_1, __reg_13_2, __reg_13_0); __h++; } } __global__ void kernel0_13(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC12(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid12) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_12_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_12_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_12_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_12_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_12_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_12_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_12_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_12_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_12_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_12_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_12_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_12_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_12_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(1, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(2, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(3, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(4, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(5, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(6, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(7, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(8, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(9, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(10, __reg_12_0, __reg_12_1, __reg_12_2); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(11, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(12, __reg_12_2, __reg_12_0, __reg_12_1); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 25); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 26); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(13, __reg_12_0, __reg_12_1, __reg_12_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_0_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_0_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 12, __reg_12_2, __reg_12_0, __reg_12_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 11, __reg_12_0, __reg_12_1, __reg_12_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 10, __reg_12_1, __reg_12_2, __reg_12_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 9, __reg_12_2, __reg_12_0, __reg_12_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 8, __reg_12_0, __reg_12_1, __reg_12_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 7, __reg_12_1, __reg_12_2, __reg_12_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 6, __reg_12_2, __reg_12_0, __reg_12_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 5, __reg_12_0, __reg_12_1, __reg_12_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 4, __reg_12_1, __reg_12_2, __reg_12_0); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 3, __reg_12_2, __reg_12_0, __reg_12_1); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 2, __reg_12_0, __reg_12_1, __reg_12_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_0_1); __STORE(__h - 1, __reg_12_1, __reg_12_2, __reg_12_0); __STORE(__h + 0, __reg_12_2, __reg_12_0, __reg_0_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __CALC12(__reg_12_0, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h - 13, __reg_12_1, __reg_12_2, __reg_12_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __CALC12(__reg_12_1, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 13, __reg_12_2, __reg_12_0, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __CALC12(__reg_12_2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 13, __reg_12_0, __reg_12_1, __reg_12_2); __h++; } } __global__ void kernel0_12(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC11(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid11) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_11_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_11_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_11_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_11_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_11_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_11_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_11_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_11_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_11_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_11_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_11_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_11_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(1, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(2, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(3, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(4, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(5, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(6, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(7, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(8, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(9, __reg_11_2, __reg_11_0, __reg_11_1); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(10, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(11, __reg_11_1, __reg_11_2, __reg_11_0); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 23); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 24); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(12, __reg_11_2, __reg_11_0, __reg_11_1); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_0_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_0_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 11, __reg_11_1, __reg_11_2, __reg_11_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 10, __reg_11_2, __reg_11_0, __reg_11_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 9, __reg_11_0, __reg_11_1, __reg_11_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 8, __reg_11_1, __reg_11_2, __reg_11_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 7, __reg_11_2, __reg_11_0, __reg_11_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 6, __reg_11_0, __reg_11_1, __reg_11_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 5, __reg_11_1, __reg_11_2, __reg_11_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 4, __reg_11_2, __reg_11_0, __reg_11_1); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 3, __reg_11_0, __reg_11_1, __reg_11_2); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 2, __reg_11_1, __reg_11_2, __reg_11_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_0_2); __STORE(__h - 1, __reg_11_2, __reg_11_0, __reg_11_1); __STORE(__h + 0, __reg_11_0, __reg_11_1, __reg_0_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __CALC11(__reg_11_2, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 12, __reg_11_0, __reg_11_1, __reg_11_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __CALC11(__reg_11_0, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 12, __reg_11_1, __reg_11_2, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __CALC11(__reg_11_1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h - 12, __reg_11_2, __reg_11_0, __reg_11_1); __h++; } } __global__ void kernel0_11(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC10(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid10) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_10_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_10_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_10_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_10_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_10_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_10_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_10_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_10_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_10_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_10_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_10_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(1, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(2, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(3, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(4, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(5, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(6, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(7, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(8, __reg_10_1, __reg_10_2, __reg_10_0); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(9, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(10, __reg_10_0, __reg_10_1, __reg_10_2); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 21); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 22); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(11, __reg_10_1, __reg_10_2, __reg_10_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_0_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_0_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 10, __reg_10_0, __reg_10_1, __reg_10_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 9, __reg_10_1, __reg_10_2, __reg_10_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 8, __reg_10_2, __reg_10_0, __reg_10_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 7, __reg_10_0, __reg_10_1, __reg_10_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 6, __reg_10_1, __reg_10_2, __reg_10_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 5, __reg_10_2, __reg_10_0, __reg_10_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 4, __reg_10_0, __reg_10_1, __reg_10_2); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 3, __reg_10_1, __reg_10_2, __reg_10_0); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 2, __reg_10_2, __reg_10_0, __reg_10_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_0_0); __STORE(__h - 1, __reg_10_0, __reg_10_1, __reg_10_2); __STORE(__h + 0, __reg_10_1, __reg_10_2, __reg_0_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC10(__reg_10_1, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 11, __reg_10_2, __reg_10_0, __reg_10_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __CALC10(__reg_10_2, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h - 11, __reg_10_0, __reg_10_1, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __CALC10(__reg_10_0, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 11, __reg_10_1, __reg_10_2, __reg_10_0); __h++; } } __global__ void kernel0_10(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_1_0; double __reg_1_1; double __reg_1_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0_0; double __reg_0_1; double __reg_0_2; __shared__ double __b_sb_double[__blockSize * 2]; double *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
4a96129b3eb3fae53566903a7c624ab8b1362507.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 512 typedef struct Data { double *a; double *b; double *c; } Data; __global__ void add( Data data, int vector_size ) { // Calculate the index in the vector for the thread using the internal variables int tid = blockIdx.x*blockDim.x + threadIdx.x; // This if statement is added in case we have more threads executing // Than number of elements in the vectors. How can this help? if (tid < vector_size){ // Compute the addition data.c[tid] = data.a[tid] + data.b[tid]; } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments (we only receive command + vector size) if (argc != 2) { // Tell the user how to run the program printf ("Usage: %s vector_size\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return 1; } // Set GPU Variables based on input arguments int vector_size = atoi(argv[1]); int grid_size = ((vector_size-1)/BLOCK_SIZE) + 1; // Set device that we will use for our cuda code // It will be 0, 1, 2 or 3 hipSetDevice(0); // Time Variables hipEvent_t start, stop; float time; hipEventCreate (&start); hipEventCreate (&stop); // CPU Struct Data data_cpu; // HERE data_cpu.a = new double [vector_size]; data_cpu.b = new double [vector_size]; data_cpu.c = new double [vector_size]; Data data_gpu_on_cpu; data_gpu_on_cpu.c = new double [vector_size]; // fill the arrays 'a' and 'b' on the CPU printf("Initializing input arrays.\n"); for (int i = 0; i < vector_size; i++) { data_cpu.a[i] = rand()*cos(i); data_cpu.b[i] = rand()*sin(i); data_cpu.c[i] = 0.0; } // allocate the memory on the GPU Data data_gpu; hipMalloc (&data_gpu.a, vector_size*sizeof(double)); hipMalloc (&data_gpu.b, vector_size*sizeof(double)); hipMalloc (&data_gpu.c, vector_size*sizeof(double)); // // CPU Calculation ////////////////// printf("Running sequential job.\n"); hipEventRecord(start,0); // Calculate C in the CPU for (int i = 0; i < vector_size; i++) { data_cpu.c[i] = data_cpu.a[i] + data_cpu.b[i]; } hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("\tSequential Job Time: %.2f ms\n", time); // // GPU Calculation //////////////////////// printf("Running parallel job.\n"); hipEventRecord(start,0); // copy the input to the GPU hipMemcpy (data_gpu.a, data_cpu.a, vector_size*sizeof(double), hipMemcpyHostToDevice); hipMemcpy (data_gpu.b, data_cpu.b, vector_size*sizeof(double), hipMemcpyHostToDevice); // call the kernel hipLaunchKernelGGL(( add), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, data_gpu, vector_size); // copy the array 'c' back from the GPU to the CPU hipMemcpy (data_gpu_on_cpu.c, data_gpu.c, vector_size*sizeof(double), hipMemcpyDeviceToHost); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("\tParallel Job Time: %.2f ms\n", time); // compare the results int error = 0; for (int i = 0; i < vector_size; i++) { if (data_cpu.c[i] != data_gpu_on_cpu.c[i]){ error = 1; printf( "Error starting element %d, %f != %f\n", i, data_gpu_on_cpu.c[i], data_cpu.c[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // free CPU data // HERE free (data_cpu.a); free (data_cpu.b); free (data_cpu.c); free (data_gpu_on_cpu.c); // free the memory allocated on the GPU hipFree (data_gpu.a); hipFree (data_gpu.b); hipFree (data_gpu.c); return 0; }
4a96129b3eb3fae53566903a7c624ab8b1362507.cu
#include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 512 typedef struct Data { double *a; double *b; double *c; } Data; __global__ void add( Data data, int vector_size ) { // Calculate the index in the vector for the thread using the internal variables int tid = blockIdx.x*blockDim.x + threadIdx.x; // This if statement is added in case we have more threads executing // Than number of elements in the vectors. How can this help? if (tid < vector_size){ // Compute the addition data.c[tid] = data.a[tid] + data.b[tid]; } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments (we only receive command + vector size) if (argc != 2) { // Tell the user how to run the program printf ("Usage: %s vector_size\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return 1; } // Set GPU Variables based on input arguments int vector_size = atoi(argv[1]); int grid_size = ((vector_size-1)/BLOCK_SIZE) + 1; // Set device that we will use for our cuda code // It will be 0, 1, 2 or 3 cudaSetDevice(0); // Time Variables cudaEvent_t start, stop; float time; cudaEventCreate (&start); cudaEventCreate (&stop); // CPU Struct Data data_cpu; // HERE data_cpu.a = new double [vector_size]; data_cpu.b = new double [vector_size]; data_cpu.c = new double [vector_size]; Data data_gpu_on_cpu; data_gpu_on_cpu.c = new double [vector_size]; // fill the arrays 'a' and 'b' on the CPU printf("Initializing input arrays.\n"); for (int i = 0; i < vector_size; i++) { data_cpu.a[i] = rand()*cos(i); data_cpu.b[i] = rand()*sin(i); data_cpu.c[i] = 0.0; } // allocate the memory on the GPU Data data_gpu; cudaMalloc (&data_gpu.a, vector_size*sizeof(double)); cudaMalloc (&data_gpu.b, vector_size*sizeof(double)); cudaMalloc (&data_gpu.c, vector_size*sizeof(double)); // // CPU Calculation ////////////////// printf("Running sequential job.\n"); cudaEventRecord(start,0); // Calculate C in the CPU for (int i = 0; i < vector_size; i++) { data_cpu.c[i] = data_cpu.a[i] + data_cpu.b[i]; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tSequential Job Time: %.2f ms\n", time); // // GPU Calculation //////////////////////// printf("Running parallel job.\n"); cudaEventRecord(start,0); // copy the input to the GPU cudaMemcpy (data_gpu.a, data_cpu.a, vector_size*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy (data_gpu.b, data_cpu.b, vector_size*sizeof(double), cudaMemcpyHostToDevice); // call the kernel add<<<grid_size, BLOCK_SIZE>>>(data_gpu, vector_size); // copy the array 'c' back from the GPU to the CPU cudaMemcpy (data_gpu_on_cpu.c, data_gpu.c, vector_size*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job Time: %.2f ms\n", time); // compare the results int error = 0; for (int i = 0; i < vector_size; i++) { if (data_cpu.c[i] != data_gpu_on_cpu.c[i]){ error = 1; printf( "Error starting element %d, %f != %f\n", i, data_gpu_on_cpu.c[i], data_cpu.c[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // free CPU data // HERE free (data_cpu.a); free (data_cpu.b); free (data_cpu.c); free (data_gpu_on_cpu.c); // free the memory allocated on the GPU cudaFree (data_gpu.a); cudaFree (data_gpu.b); cudaFree (data_gpu.c); return 0; }
2c0c12380fc190536a6140299eaef1e15a37441f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/attention_random_input_conv_layer.hpp" #include <stdio.h> #include <stdlib.h> #include <time.h> namespace caffe { template <typename Dtype> __global__ void inspect_random_kernel(const int n, Dtype* a) { CUDA_KERNEL_LOOP(index, n) { printf("%f\n",a[index] ); } } template <typename Dtype> __global__ void add_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b; } } template <typename Dtype> __global__ void mul_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b; } } template <typename Dtype> void AttentionRandomInputConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_attention = bottom[1]->gpu_data(); const Dtype* random = bottom[2]->mutable_gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); //printf("forward attention conv layer 0\n"); //srand((unsigned int)time(NULL)); for (int n = 0; n < this->num_; ++n) { const Dtype* input = bottom_data + n * this->bottom_dim_; const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_; const Dtype* random_input = random + n * kernel_dim_ * height_ * width_; // printf("forward attention conv layer 1\n"); conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); // printf("forward attention conv layer 2\n"); caffe_gpu_mul( kernel_dim_ * height_ * width_,col_buffer_.gpu_data(),att_input, attention_col_buffer_.mutable_gpu_data()); // printf("forward attention conv layer 3\n"); //printf("lilac!!\n"); //int number = kernel_dim_ * height_ * width_; //caffe_gpu_rng_uniform(kernel_dim_ * height_ * width_,mini, maxi,random_input); //printf("lilac!!\n"); //inspect_random_kernel<<<CAFFE_GET_BLOCKS(10), CAFFE_CUDA_NUM_THREADS>>>(10,random_input); //printf("\n"); //mul_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, maxi - mini, random_input); //add_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, mini, random_input); caffe_gpu_mul(kernel_dim_ * height_ * width_ ,attention_col_buffer_.gpu_data(), random_input, attention_col_buffer_.mutable_gpu_data()); const Dtype* att_col_buff = attention_col_buffer_.gpu_data(); // printf("forward attention conv layer 4\n"); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /* N' */, conv_out_spatial_dim_/* H' * W' */ , kernel_dim_/* C * h * w */, (Dtype)1., weight /* C' * C * h * w */, att_col_buff /* C * h * w * H' * W' */, (Dtype)0., top_data + n * this->top_dim_); // C' * H' * W' // printf("forward attention conv layer 5\n"); if (this->bias_term_) { // printf("forward attention conv layer 5.5\n"); const Dtype* bias = this->blobs_[1]->gpu_data(); // printf("forward attention conv layer 6\n"); this->forward_gpu_bias(top_data + n * this->top_dim_, bias); // printf("forward attention conv layer 7\n"); } } } template <typename Dtype> void AttentionRandomInputConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_attention = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom_att_diff = bottom[1]->mutable_gpu_diff(); const Dtype* random = bottom[2]->gpu_data(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[0]) { for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_; const Dtype* random_input = random + n * kernel_dim_ * height_ * width_; if (this->param_propagate_down_[0]) { const Dtype* input = bottom_data + n * this->bottom_dim_; conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); const Dtype* col_buff = col_buffer_.gpu_data(); caffe_gpu_mul(kernel_dim_ * height_ * width_,col_buff,att_input, attention_col_buffer_.mutable_gpu_data()); caffe_gpu_mul( kernel_dim_ * height_ * width_,random_input,attention_col_buffer_.gpu_data(), attention_col_buffer_.mutable_gpu_data()); const Dtype* att_col_buff = attention_col_buffer_.gpu_data(); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ /* N' */, kernel_dim_ /* C * h * w */ , conv_out_spatial_dim_/* H' * W' */, (Dtype)1., top_diff + n * this->top_dim_ , att_col_buff, (Dtype)1., weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[0]) { Dtype * att_col_diff_buff = attention_col_diff_buff_.mutable_gpu_data(); caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_, (Dtype)1., weight , top_diff + n * this->top_dim_ , (Dtype)0., att_col_diff_buff ); caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,random_input, attention_col_diff_buff_.mutable_gpu_data()); caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,att_input, col_diff_buffer_.mutable_gpu_data()); conv_col2im_gpu(col_diff_buffer_.gpu_data(), bottom_diff + n * this->bottom_dim_); caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,col_buffer_.gpu_data(),bottom_att_diff + n * kernel_dim_ * height_ * width_); } } } } INSTANTIATE_LAYER_GPU_FUNCS(AttentionRandomInputConvolutionLayer); } // namespace caffe
2c0c12380fc190536a6140299eaef1e15a37441f.cu
#include <vector> #include "caffe/layers/attention_random_input_conv_layer.hpp" #include <stdio.h> #include <stdlib.h> #include <time.h> namespace caffe { template <typename Dtype> __global__ void inspect_random_kernel(const int n, Dtype* a) { CUDA_KERNEL_LOOP(index, n) { printf("%f\n",a[index] ); } } template <typename Dtype> __global__ void add_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b; } } template <typename Dtype> __global__ void mul_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b; } } template <typename Dtype> void AttentionRandomInputConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_attention = bottom[1]->gpu_data(); const Dtype* random = bottom[2]->mutable_gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); //printf("forward attention conv layer 0\n"); //srand((unsigned int)time(NULL)); for (int n = 0; n < this->num_; ++n) { const Dtype* input = bottom_data + n * this->bottom_dim_; const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_; const Dtype* random_input = random + n * kernel_dim_ * height_ * width_; // printf("forward attention conv layer 1\n"); conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); // printf("forward attention conv layer 2\n"); caffe_gpu_mul( kernel_dim_ * height_ * width_,col_buffer_.gpu_data(),att_input, attention_col_buffer_.mutable_gpu_data()); // printf("forward attention conv layer 3\n"); //printf("lilac!!\n"); //int number = kernel_dim_ * height_ * width_; //caffe_gpu_rng_uniform(kernel_dim_ * height_ * width_,mini, maxi,random_input); //printf("lilac!!\n"); //inspect_random_kernel<<<CAFFE_GET_BLOCKS(10), CAFFE_CUDA_NUM_THREADS>>>(10,random_input); //printf("\n"); //mul_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, maxi - mini, random_input); //add_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, mini, random_input); caffe_gpu_mul(kernel_dim_ * height_ * width_ ,attention_col_buffer_.gpu_data(), random_input, attention_col_buffer_.mutable_gpu_data()); const Dtype* att_col_buff = attention_col_buffer_.gpu_data(); // printf("forward attention conv layer 4\n"); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /* N' */, conv_out_spatial_dim_/* H' * W' */ , kernel_dim_/* C * h * w */, (Dtype)1., weight /* C' * C * h * w */, att_col_buff /* C * h * w * H' * W' */, (Dtype)0., top_data + n * this->top_dim_); // C' * H' * W' // printf("forward attention conv layer 5\n"); if (this->bias_term_) { // printf("forward attention conv layer 5.5\n"); const Dtype* bias = this->blobs_[1]->gpu_data(); // printf("forward attention conv layer 6\n"); this->forward_gpu_bias(top_data + n * this->top_dim_, bias); // printf("forward attention conv layer 7\n"); } } } template <typename Dtype> void AttentionRandomInputConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_attention = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom_att_diff = bottom[1]->mutable_gpu_diff(); const Dtype* random = bottom[2]->gpu_data(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); } } if (this->param_propagate_down_[0] || propagate_down[0]) { for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_; const Dtype* random_input = random + n * kernel_dim_ * height_ * width_; if (this->param_propagate_down_[0]) { const Dtype* input = bottom_data + n * this->bottom_dim_; conv_im2col_gpu(input, col_buffer_.mutable_gpu_data()); const Dtype* col_buff = col_buffer_.gpu_data(); caffe_gpu_mul(kernel_dim_ * height_ * width_,col_buff,att_input, attention_col_buffer_.mutable_gpu_data()); caffe_gpu_mul( kernel_dim_ * height_ * width_,random_input,attention_col_buffer_.gpu_data(), attention_col_buffer_.mutable_gpu_data()); const Dtype* att_col_buff = attention_col_buffer_.gpu_data(); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ /* N' */, kernel_dim_ /* C * h * w */ , conv_out_spatial_dim_/* H' * W' */, (Dtype)1., top_diff + n * this->top_dim_ , att_col_buff, (Dtype)1., weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[0]) { Dtype * att_col_diff_buff = attention_col_diff_buff_.mutable_gpu_data(); caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_, (Dtype)1., weight , top_diff + n * this->top_dim_ , (Dtype)0., att_col_diff_buff ); caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,random_input, attention_col_diff_buff_.mutable_gpu_data()); caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,att_input, col_diff_buffer_.mutable_gpu_data()); conv_col2im_gpu(col_diff_buffer_.gpu_data(), bottom_diff + n * this->bottom_dim_); caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,col_buffer_.gpu_data(),bottom_att_diff + n * kernel_dim_ * height_ * width_); } } } } INSTANTIATE_LAYER_GPU_FUNCS(AttentionRandomInputConvolutionLayer); } // namespace caffe
fe31d67c3af3db49f74bd275cab5b75ca0e7dfae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define MAX_BUF 100000000 typedef unsigned int UINT; UINT buffer[MAX_BUF]; // __global__ void kernel() { } UINT ReadFile(const char *szFile, UINT data[]) { UINT len; FILE *fp; fp = fopen(szFile, "rb"); fread(&len, sizeof(UINT), 1, fp); if (len > MAX_BUF) { fclose(fp); return 0; } fread(data, sizeof(UINT), len, fp); fclose(fp); return len; } void WriteFile(const char *szFile, UINT data[], UINT len) { FILE *fp; if (len > MAX_BUF) return; fp = fopen(szFile, "wb"); fwrite(&len, sizeof(UINT), 1, fp); fwrite(data, sizeof(UINT), len, fp); fclose(fp); } int main(int argc, char *argv[]) { UINT length; if (argc != 2) return 1; length = ReadFile(argv[1], buffer); //sorting code //.... WriteFile("output.bin", buffer, length); return 0; }
fe31d67c3af3db49f74bd275cab5b75ca0e7dfae.cu
#include <stdio.h> #define MAX_BUF 100000000 typedef unsigned int UINT; UINT buffer[MAX_BUF]; // 核函数统一使用该命名,参数列表可自定义 __global__ void kernel() { } UINT ReadFile(const char *szFile, UINT data[]) { UINT len; FILE *fp; fp = fopen(szFile, "rb"); fread(&len, sizeof(UINT), 1, fp); if (len > MAX_BUF) { fclose(fp); return 0; } fread(data, sizeof(UINT), len, fp); fclose(fp); return len; } void WriteFile(const char *szFile, UINT data[], UINT len) { FILE *fp; if (len > MAX_BUF) return; fp = fopen(szFile, "wb"); fwrite(&len, sizeof(UINT), 1, fp); fwrite(data, sizeof(UINT), len, fp); fclose(fp); } int main(int argc, char *argv[]) { UINT length; if (argc != 2) return 1; length = ReadFile(argv[1], buffer); //sorting code //.... WriteFile("output.bin", buffer, length); return 0; }
fc05182cbcb7141a68222e09e8d3024f1a73567c.hip
// !!! This is a file automatically generated by hipify!!! /*! \file main.cu \brief Driver or the entry point of the program */ #include <stdio.h> #include <iostream> #include <algorithm> #include <vector> #include <utility> #include<string> #include <cmath> #include <cassert> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_complex.h> #include "mmio.h" #include "double_complex.h" #include "Matrix.h" #include "Preconditioner.h" #include "Solver.h" #include "Read_Write_Data.h" //! main function(the driver) /*! Takes in command line arguments - matrix A and b files(b is optional) and produces final solution(after solving Ax = b) */ int main(int argc, char* argv[]) { CSR_Matrix* A = nullptr; Dense_Matrix* b = nullptr; Read_Matrix_A_and_vector_b(&A, &b, argc, argv); //reads A and b into cpu memory A->Allocate_Memory(LOCATION::GPU); b->Allocate_Memory(LOCATION::GPU); A->CopyMatrix_cpu_to_gpu(); b->CopyMatrix_cpu_to_gpu(); //A and b on cpu and gpu - ready Dense_Matrix x(b->GetRows(), 1, b->GetRows(), ORDER::COLUMN_MAJOR, CPU_EXISTENCE::EXISTENT, GPU_EXISTENCE::EXISTENT); for (int i = 0; i < x.GetRows(); i++) //Set initial guess { x.GetCPUValues()[i].x = 0; x.GetCPUValues()[i].y = 0; } x.CopyMatrix_cpu_to_gpu(); //Generate preconditioner - Richardson/Jacobi Preconditioner* precond = Generate_Preconditioner(PRECONDITIONER_TYPE::RICHARDSON, *A); if(precond == nullptr) { std::cout << "\nProblem in generating preconditioner."; exit(1); } Solver solver_obj; //Can set atol, rtol, max_iter etc... using solver_obj setter functions solver_obj.SetRtol(1e-04); solver_obj.SetAtol(1e-20); // solver_obj.SetMax_iter(100); int shadow_space_number = 4; solver_obj.PIDR_Solver(*A, *b, x, *precond, shadow_space_number); //Can change value of shadow space number here std::cout << "\n\n-----------------------------------Solver info:-----------------------------------------------------------"; std::cout << std::endl << "info: " << solver_obj.GetInfo(); std::cout << std::endl << "iter_resdidual:" << solver_obj.GetIter_residual(); std::cout << std::endl << "init_residual:" << solver_obj.GetInit_residual(); std::cout << std::endl << "final_residual:" << solver_obj.GetFinal_residual(); std::cout << std::endl << "runtime in milliseconds:" << solver_obj.GetRuntimeMilliseconds(); std::cout << std::endl << "max_iter: " << solver_obj.GetMax_iter(); std::cout << std::endl << "spmv_count:" << solver_obj.GetSpmv_count(); std::cout << std::endl << "num_iter:" << solver_obj.GetNum_iter(); std::cout << std::endl << "full_cycle:" << solver_obj.GetFull_cycle(); std::cout << std::endl << "rtol:" << solver_obj.GetRtol(); std::cout << std::endl << "atol:" << solver_obj.GetAtol(); if(solver_obj.GetInfo() == "SUCCESS") { std::cout << "\nSolution is written into file:" << " Vector_x.mtx"; std::cout << "\nResidual vector is written into file:" << " Vector_res.mtx"; Write_matrix("Vector_x.mtx" , x); Write_matrix("Vector_res.mtx", solver_obj.GetResvec()); } std::cout << "\nGenerating a log.txt file containing the iteratively computed residuals along with the timings at which they are computed." << std::endl; //Generates log FILE* fp; fp = fopen("log.txt" , "w"); for(int i=0;i<solver_obj.GetResvec().size();i++) { fprintf(fp,"\n Normr = %lg Timing = %lg",solver_obj.GetResvec()[i].normr,solver_obj.GetResvec()[i].timing); } //----------------------------------------------------------------------------------------------------------- delete A; delete b; delete precond; }
fc05182cbcb7141a68222e09e8d3024f1a73567c.cu
/*! \file main.cu \brief Driver or the entry point of the program */ #include <stdio.h> #include <iostream> #include <algorithm> #include <vector> #include <utility> #include<string> #include <cmath> #include <cassert> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuComplex.h> #include "mmio.h" #include "double_complex.h" #include "Matrix.h" #include "Preconditioner.h" #include "Solver.h" #include "Read_Write_Data.h" //! main function(the driver) /*! Takes in command line arguments - matrix A and b files(b is optional) and produces final solution(after solving Ax = b) */ int main(int argc, char* argv[]) { CSR_Matrix* A = nullptr; Dense_Matrix* b = nullptr; Read_Matrix_A_and_vector_b(&A, &b, argc, argv); //reads A and b into cpu memory A->Allocate_Memory(LOCATION::GPU); b->Allocate_Memory(LOCATION::GPU); A->CopyMatrix_cpu_to_gpu(); b->CopyMatrix_cpu_to_gpu(); //A and b on cpu and gpu - ready Dense_Matrix x(b->GetRows(), 1, b->GetRows(), ORDER::COLUMN_MAJOR, CPU_EXISTENCE::EXISTENT, GPU_EXISTENCE::EXISTENT); for (int i = 0; i < x.GetRows(); i++) //Set initial guess { x.GetCPUValues()[i].x = 0; x.GetCPUValues()[i].y = 0; } x.CopyMatrix_cpu_to_gpu(); //Generate preconditioner - Richardson/Jacobi Preconditioner* precond = Generate_Preconditioner(PRECONDITIONER_TYPE::RICHARDSON, *A); if(precond == nullptr) { std::cout << "\nProblem in generating preconditioner."; exit(1); } Solver solver_obj; //Can set atol, rtol, max_iter etc... using solver_obj setter functions solver_obj.SetRtol(1e-04); solver_obj.SetAtol(1e-20); // solver_obj.SetMax_iter(100); int shadow_space_number = 4; solver_obj.PIDR_Solver(*A, *b, x, *precond, shadow_space_number); //Can change value of shadow space number here std::cout << "\n\n-----------------------------------Solver info:-----------------------------------------------------------"; std::cout << std::endl << "info: " << solver_obj.GetInfo(); std::cout << std::endl << "iter_resdidual:" << solver_obj.GetIter_residual(); std::cout << std::endl << "init_residual:" << solver_obj.GetInit_residual(); std::cout << std::endl << "final_residual:" << solver_obj.GetFinal_residual(); std::cout << std::endl << "runtime in milliseconds:" << solver_obj.GetRuntimeMilliseconds(); std::cout << std::endl << "max_iter: " << solver_obj.GetMax_iter(); std::cout << std::endl << "spmv_count:" << solver_obj.GetSpmv_count(); std::cout << std::endl << "num_iter:" << solver_obj.GetNum_iter(); std::cout << std::endl << "full_cycle:" << solver_obj.GetFull_cycle(); std::cout << std::endl << "rtol:" << solver_obj.GetRtol(); std::cout << std::endl << "atol:" << solver_obj.GetAtol(); if(solver_obj.GetInfo() == "SUCCESS") { std::cout << "\nSolution is written into file:" << " Vector_x.mtx"; std::cout << "\nResidual vector is written into file:" << " Vector_res.mtx"; Write_matrix("Vector_x.mtx" , x); Write_matrix("Vector_res.mtx", solver_obj.GetResvec()); } std::cout << "\nGenerating a log.txt file containing the iteratively computed residuals along with the timings at which they are computed." << std::endl; //Generates log FILE* fp; fp = fopen("log.txt" , "w"); for(int i=0;i<solver_obj.GetResvec().size();i++) { fprintf(fp,"\n Normr = %lg Timing = %lg",solver_obj.GetResvec()[i].normr,solver_obj.GetResvec()[i].timing); } //----------------------------------------------------------------------------------------------------------- delete A; delete b; delete precond; }
886ae2144c25c7bb329921b2c19523c2e09d87b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // LOG(INFO)<<"the entry in forward"; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); Dtype* bottom_diff; Dtype* dh; Dtype* dv; Dtype* Mdt; // int N_; int* defh; int* defv; int* defp; int* Ih; int* Iv; Dtype* defw; int Nparam = kernel_size_*kernel_size_; const Dtype* bottom_data_p; // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_DEF: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); dh_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); dv_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); Iv_.resize((channels_*bottom[0]->num()*height_*width_ )); //each point in inputmap Ih_.resize((channels_*bottom[0]->num()*height_*width_ )); top[0]->Reshape(bottom[0]->num(), channels_, 1, 1); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); for (int ch = 0; ch < channels_; ++ch) { if (defw[ch*4+0] < blobl_a_min_) defw[ch*4+0] = blobl_a_min_; if (defw[ch*4+2] < blobl_a_min_) defw[ch*4+2] = blobl_a_min_; // if (defw[ch*4+1] > 0.2) // defw[ch*4+1] = 0.2; // if (defw[ch*4+3] > 0.2) // defw[ch*4+3] = 0.2; /* for (int i=0; i< 4; ++i) if (fabs(defw[ch*4+i]) > 5) LOG(INFO) << "defw > 5" <<ch <<": " << defw[ch*4+i];*/ //if ( (ch==0) ) // LOG(INFO) << "defw" <<ch <<": " << defw[ch*4+0] << ", "<< defw[ch*4+1]<< ", " << defw[ch*4+2]<< ", " << defw[ch*4+3]; } // LOG(INFO) << "ff defw:" << defw[0*4+0] << ", "<< defw[0*4+1]<< ", " << defw[0*4+2]<< ", " << defw[0*4+3]; // bool Flag_print; N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF"<<bottom[0]->num() <<","<< channels_<<bottom[0]<<","<< pooled_height_; for (int n = 0; n < bottom[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; //Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) // LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); //LOG(INFO) << "defw" <<ch <<": " << defw[ch*4+0] << ", "<< defw[ch*4+1]<< ", " << defw[ch*4+2]<< ", " << defw[ch*4+3]; //LOG(INFO) << "ff defw:" << defw[ch*4+0] << ", "<< defw[ch*4+1]<< ", " << defw[ch*4+2]<< ", " << defw[ch*4+3]; // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"; //LOG(INFO) << defp[ch]<<","<<data_pointer[defp[ch]]<<","<<Mdt[defp[ch]]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; top_data[n*channels_+ch] = Mdt[defp[ch]]; /* // facillitate BP using conv LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.3"; memset(data_pointer2, 0, sizeof(Dtype) * N_); LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; data_pointer2[defp[ch]] = top_data[n*channels_+ch];*/ //LOG(INFO) << "dh_dv" <<ch <<": "<<defh[ch] << ", "<< Ih[(n*channels_+ch)*N_+defp[ch]]<< ", " << defv[ch] << ", "<<Iv[(n*channels_+ch)*N_+defp[ch]]; if ((n==0)&&( top_data[n*channels_+ch] > 10000)) LOG(INFO) << "Forward_gpu dt2!!: vals:" << top_data[n*channels_+ch] <<" from " << data_pointer[defp[ch]]; // obtain the dh and dv of each part dh[n*channels_+ch] = defh[ch] - Ih[(n*channels_+ch)*N_+defp[ch]]; dv[n*channels_+ch] = defv[ch] - Iv[(n*channels_+ch)*N_+defp[ch]]; //CHECK_EQ(Flag_print, 1); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.5"; } } //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.6:" << top_data[0]; top[0]->mutable_gpu_data(); // bottom[0]->mutable_gpu_data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); // LOG(INFO)<<"the entry in DEF_ALL forward"; bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); dh_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); dv_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); Iv_.resize((channels_*bottom[0]->num()*height_*width_ )); //each point in inputmap Ih_.resize((channels_*bottom[0]->num()*height_*width_ )); //tmpIx_.resize((N_ )); // for one map, n = width * height //tmpIy_.resize((N_ )); //defh_.resize((channels_ )); // part location //defv_.resize((channels_ )); //defp_.resize((channels_ )); //Mdt_.resize((N_)); //tmpM_.resize(N_); top[0]->Reshape(bottom[0]->num(), pooled_height_ * pooled_width_, 1, 1); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); memset(dh, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); memset(dv, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); defw = this->blobs_[0]->mutable_cpu_data(); for (int ch = 0; ch < channels_; ++ch) { if (defw[ch*4+0] < blobl_a_min_) defw[ch*4+0] = blobl_a_min_; if (defw[ch*4+2] < blobl_a_min_) defw[ch*4+2] = blobl_a_min_; } N_ = width_ * height_; defh = defh_.data(); // for part horizantal start point defv = defv_.data(); //for vertical start point defp = defp_.data(); //the w*dim0 + h Ih = Ih_.data(); //save the best location for dt Iv = Iv_.data(); // LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2 "<<bottom[0]->num() <<","<<channels_<<","<<width_<<","<<height_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) //LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; /* for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data4[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS]; }*/ dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); /*for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << data_pointer[phS * width_ + pwS] ; LOG(INFO) << "fp data3[" << phS << "," << pwS <<"]:" << "=>" << "[" << ihval << "," << ivval << "]:" << Mdt[phS * width_ + pwS]; }*/ // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"<<","<<n<<","<<ch; //LOG(INFO) << defp[ch]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; // dh[n*channels_+ch] = 0; // dv[n*channels_+ch] = 0; // LOG(INFO)<< "defw:" << defw[ch*4+0] <<"," << defw[ch*4+1] <<","<< defw[ch*4+2]<<","<< defw[ch*4+3]; // for (int ph = 0; ph < pooled_height_; ++ph) { int ph = static_cast<int>(ch/pooled_width_); int pw = ch - ph*pooled_width_; int phS = ph * stride_; // for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; // LOG(INFO) << ph <<","<< pw; int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype hdif = phS - Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype vdif = pwS - Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; // LOG(INFO) << "fp data[" << defval << "]:" << data_pointer[defval] <<","<< Mdt[defval]; // LOG(INFO) << "fp data2:" << phS <<"," <<pwS << "=> " << ihval << "," << ivval << "," << data_pointer[ihval * width_ + ivval]; // top_data[ph * pooled_width_ + pw] = Mdt[phS * width_ + pwS]; top_data[0] = Mdt[phS * width_ + pwS]; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = hdif; dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; // } // } top_data += top[0]->offset(0, 1); // top_data[n*channels_+ch] = Mdt[defp[ch]]; /* // facillitate BP using conv LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.3"; memset(data_pointer2, 0, sizeof(Dtype) * N_); LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; data_pointer2[defp[ch]] = top_data[n*channels_+ch];*/ //LOG(INFO) << "dh_dv" <<ch <<": "<<defh[ch] << ", "<< Ih[(n*channels_+ch)*N_+defp[ch]]<< ", " << defv[ch] << ", "<<Iv[(n*channels_+ch)*N_+defp[ch]]; // if ((n==0)&&( top_data[n*channels_+ch] > 10000)) // LOG(INFO) << "Forward_gpu dt2!!: vals:" << top_data[n*channels_+ch] <<" from " << data_pointer[defp[ch]]; // obtain the dh and dv of each part //CHECK_EQ(Flag_print, 1); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.5"; } } //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.6:" << top_data[0]; top[0]->mutable_gpu_data(); // LOG(INFO)<<"the end in DEF_ALL forward"; // bottom[0]->mutable_gpu_data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL2: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); for (int ch = 0; ch < channels_; ++ch) { if (defw[ch*4+0] < blobl_a_min_) defw[ch*4+0] = blobl_a_min_; if (defw[ch*4+2] < blobl_a_min_) defw[ch*4+2] = blobl_a_min_; } N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2"<<bottom[0]->num() <<","<<channels_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) /* LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data4[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS]; }*/ dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); /* for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data3[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS] << "=>" << "[" << ihval << "," << ivval << "]:" << Mdt[phS * width_ + pwS]; } */ // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"; //LOG(INFO) << defp[ch]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; // dh[n*channels_+ch] = 0; // dv[n*channels_+ch] = 0; // LOG(INFO)<< "defw:" << defw[ch*4+0] <<"," << defw[ch*4+1] <<","<< defw[ch*4+2]<<","<< defw[ch*4+3]; for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype hdif = phS - Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype vdif = pwS - Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; // LOG(INFO) << "fp data[" << defval << "]:" << data_pointer[defval] <<","<< data_pointer[defval-1] <<"," << data_pointer[defval+1] << ", "<< Mdt[defval]; // LOG(INFO) << "fp data2:" << phS <<"," <<pwS << "=> " << ihval << "," << ivval << "," << data_pointer[ihval * width_ + ivval]; top_data[ph * pooled_width_ + pw] = data_pointer[ihval * width_ + ivval]; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = hdif; dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; } } top_data += top[0]->offset(0, 1); // top_data[n*channels_+ch] = Mdt[defp[ch]]; /* // facillitate BP using conv LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.3"; memset(data_pointer2, 0, sizeof(Dtype) * N_); LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; data_pointer2[defp[ch]] = top_data[n*channels_+ch];*/ //LOG(INFO) << "dh_dv" <<ch <<": "<<defh[ch] << ", "<< Ih[(n*channels_+ch)*N_+defp[ch]]<< ", " << defv[ch] << ", "<<Iv[(n*channels_+ch)*N_+defp[ch]]; // if ((n==0)&&( top_data[n*channels_+ch] > 10000)) // LOG(INFO) << "Forward_gpu dt2!!: vals:" << top_data[n*channels_+ch] <<" from " << data_pointer[defp[ch]]; // obtain the dh and dv of each part //CHECK_EQ(Flag_print, 1); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.5"; } } //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.6:" << top_data[0]; top[0]->mutable_gpu_data(); break; case PoolingParameter_PoolMethod_DEF_ALL3: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF_ALL3"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2"<<bottom[0]->num() <<","<<channels_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); /* for (int ch = 0; ch < channels_; ++ch) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { int idx = ch*Nparam+ph*pooled_height_+pw; if (defw[idx] < 0) defw[idx] = 0; } } }*/ for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; int center = kernel_size_/2; int def_center = center+center*kernel_size_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) /* LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data4[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS]; }*/ // dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); /* for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data3[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS] << "=>" << "[" << ihval << "," << ivval << "]:" << Mdt[phS * width_ + pwS]; } */ // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"; //LOG(INFO) << defp[ch]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; // dh[n*channels_+ch] = 0; // dv[n*channels_+ch] = 0; // LOG(INFO)<< "defw:" << defw[ch*4+0] <<"," << defw[ch*4+1] <<","<< defw[ch*4+2]<<","<< defw[ch*4+3]; for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; if (phS > height_-kernel_size_) phS = height_ - kernel_size_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; int defwidx; int maxIdx; int hstart = ph * stride_h_ - pad_h_; int wstart = pw * stride_w_ - pad_w_; int hstart_ori = hstart; int wstart_ori = wstart; int hend = min(hstart + kernel_h_, height_); int wend = min(wstart + kernel_w_, width_); hstart = max(hstart, 0); wstart = max(wstart, 0); const int pool_index = ph * pooled_width_ + pw; Dtype maxVal = -FLT_MAX; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int index = h * width_ + w; int kh = h-hstart_ori; int kv = w-wstart_ori; int defwidx = kh*kernel_size_+kv; Dtype CurVal1 = data_pointer[index]; Dtype CurVal2 = CurVal1+defw[ch*Nparam+defwidx]; if (CurVal2 > maxVal) { maxVal = CurVal2; // mask[pool_index] = index; maxIdx = defwidx; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = index; } } } top_data[pool_index] = maxVal; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = maxIdx; /* int pwS = pw * stride_; int defval = phS * width_ + pwS; int defwidx; int maxIdx; if (pwS > width_-kernel_size_) pwS = width_ - kernel_size_; maxIdx = def_center; Dtype maxVal = -FLT_MAX; defwidx = 0; for (int kh=0; kh<kernel_size_; kh++) for (int kv=0; kv<kernel_size_; kv++) { Dtype CurVal1 = data_pointer[(phS+kh) * width_ + pwS+kv]; Dtype CurVal2 = CurVal1+defw[ch*Nparam+defwidx]; if (fabs(CurVal1-CurVal2) > 0.0001) LOG(INFO) << "CurVal1, 2:" << CurVal1 <<", "<< CurVal2; if (maxVal < CurVal2) { maxVal = CurVal2; maxIdx = defwidx; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = (phS+kh) * width_ + pwS+kv; } defwidx++; } top_data[ph * pooled_width_ + pw] = maxVal; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = maxIdx; // dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; */ } } top_data += top[0]->offset(0, 1); } } top[0]->mutable_gpu_data(); break; case PoolingParameter_PoolMethod_DEF_ALL4: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2"<<bottom[0]->num() <<","<<channels_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; if (phS > height_-kernel_size_) phS = height_ - kernel_size_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; int defwidx; int maxIdx; maxIdx = 0; if (pwS > width_-kernel_size_) pwS = width_ - kernel_size_; Dtype maxVal1 = data_pointer[phS * width_ + pwS]; Dtype maxVal = maxVal1+defw[ch*Nparam+0]; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = defval; defwidx = 0; for (int kh=0; kh<kernel_size_; kh++) for (int kv=0; kv<kernel_size_; kv++) { Dtype CurVal1 = data_pointer[(phS+kh) * width_ + pwS+kv]; Dtype CurVal2 = CurVal1+defw[ch*Nparam+defwidx]; if (maxVal < CurVal2) { maxVal = CurVal2; maxVal1 = CurVal1; maxIdx = defwidx; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = (phS+kh) * width_ + pwS+kv; } defwidx++; } top_data[ph * pooled_width_ + pw] = maxVal1; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = maxIdx; // dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; } } top_data += top[0]->offset(0, 1); } } top[0]->mutable_gpu_data(); break; case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //LOG(INFO)<<"the entry in backward"; if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; Dtype* d_defw; Dtype* dh; Dtype* dv; int* Iv; int* Ih; int* defp; int N2; int Nparam = kernel_size_*kernel_size_; const Dtype* bottom_data_p; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_DEF: //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF"; /* top_data = top[0]->cpu_data(); bottom_data = (*bottom)[0]->cpu_data();*/ bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); // LOG(INFO)<<"top_channels:"<<top[0]->channels(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 1"; Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); // Ih = reinterpret_cast<int*>(Ih_->mutable_cpu_data()); // Iv = reinterpret_cast<int*>(Iv_->mutable_cpu_data()); // defp = reinterpret_cast<int*>(defp_->mutable_cpu_data()); d_defw = this->blobs_[0]->mutable_cpu_diff(); // memset(d_defw, 0, channels_*4* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2"; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.1"; int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.2"; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.3"; Dtype t_dif = top_diff[0]; // Dtype t_dif = top_diff[n*channels_+ch]; /* if ( (n==0) && (ch==0) ) for (int h = hstart; h < hend; ++h) { for (int v = vstart; v < vend; ++v) { bottom_diff[h * width_ + v] = t_dif; } } else*/ /* for (int h = hstart; h < hend; ++h) { for (int v = vstart; v < vend; ++v) { bottom_diff[h * width_ + v] = t_dif; } } */ bottom_diff[hstart * width_ + vstart] += t_dif; /* if (fabs(t_dif) > 1) LOG(INFO) << "bp t_dif>1:" << t_dif << ", "<< ch << "," <<n; if ( (n==0) && (fabs(t_dif) > 0.01)) LOG(INFO) << "Backward_gpu!!:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); */ // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ if (n==0) { d_defw[ch*4+0] =-t_dif*dv[n*channels_+ch]*dv[n*channels_+ch]; d_defw[ch*4+1] =-t_dif*dv[n*channels_+ch]; d_defw[ch*4+2] =-t_dif*dh[n*channels_+ch]*dh[n*channels_+ch]; d_defw[ch*4+3] =-t_dif*dh[n*channels_+ch]; } else { d_defw[ch*4+0] -= t_dif*dv[n*channels_+ch]*dv[n*channels_+ch]; d_defw[ch*4+1] -= t_dif*dv[n*channels_+ch]; d_defw[ch*4+2] -= t_dif*dh[n*channels_+ch]*dh[n*channels_+ch]; d_defw[ch*4+3] -= t_dif*dh[n*channels_+ch]; } //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); // top[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL: // LOG(INFO)<<"the entry in DEF_ALL backward"; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF"; bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); d_defw = this->blobs_[0]->mutable_cpu_diff(); memset(d_defw, 0, channels_*4* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); N2 = pooled_width_*pooled_height_; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2"; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.1"; int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.2"; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.3"; //for (int ph = 0; ph < pooled_height_; ++ph) { int ph = static_cast<int>(ch/pooled_width_); int pw = ch - ph*pooled_width_; int phS = ph * stride_; // for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int ihb = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivb = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype t_dif = top_diff[ph * pooled_width_ + pw]; bottom_diff[ihb * width_ + ivb] += t_dif; // LOG(INFO) << "bp: " << ph << "*" << pooled_width_ <<"+" << pw << "=>" << ihb << "*" << width_ << "+" << ivb; Dtype D1, D2, D3, D4; D1 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D2 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D3 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D4 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; if (n==0) { d_defw[ch*4+0] =-t_dif*D1; d_defw[ch*4+1] =-t_dif*D2; d_defw[ch*4+2] =-t_dif*D3; d_defw[ch*4+3] =-t_dif*D4; } else { d_defw[ch*4+0] -= t_dif*D1; d_defw[ch*4+1] -= t_dif*D2; d_defw[ch*4+2] -= t_dif*D3; d_defw[ch*4+3] -= t_dif*D4; } // } // } // Dtype t_dif = top_diff[0]; // bottom_diff[hstart * width_ + vstart] += t_dif; // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; // LOG(INFO) <<"bp offset:" << (*bottom)[0]->offset(0, 1) << "," <<top[0]->offset(0, 1); bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); // top[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); // LOG(INFO)<<"the end in DEF_ALL backward"; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL2: bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); d_defw = this->blobs_[0]->mutable_cpu_diff(); memset(d_defw, 0, channels_*4* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); N2 = pooled_width_*pooled_height_; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_); for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int ihb = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivb = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype t_dif = top_diff[ph * pooled_width_ + pw]; bottom_diff[ihb * width_ + ivb] += t_dif; // LOG(INFO) << "bp: " << ph << "*" << pooled_width_ <<"+" << pw << "=>" << ihb << "*" << width_ << "+" << ivb; Dtype D1, D2, D3, D4; D1 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D2 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D3 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D4 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; if (n==0) { d_defw[ch*4+0] =-t_dif*D1; d_defw[ch*4+1] =-t_dif*D2; d_defw[ch*4+2] =-t_dif*D3; d_defw[ch*4+3] =-t_dif*D4; } else { d_defw[ch*4+0] -= t_dif*D1; d_defw[ch*4+1] -= t_dif*D2; d_defw[ch*4+2] -= t_dif*D3; d_defw[ch*4+3] -= t_dif*D4; } } } // Dtype t_dif = top_diff[0]; // bottom_diff[hstart * width_ + vstart] += t_dif; // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; // LOG(INFO) <<"bp offset:" << (*bottom)[0]->offset(0, 1) << "," <<top[0]->offset(0, 1); bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); // top[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL3: case PoolingParameter_PoolMethod_DEF_ALL4: //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF_ALL3"; bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); d_defw = this->blobs_[0]->mutable_cpu_diff(); memset(d_defw, 0, channels_*kernel_size_*kernel_size_* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); N2 = pooled_width_*pooled_height_; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { /* int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_);*/ for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int idxInv = Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; Dtype t_dif = top_diff[ph * pooled_width_ + pw]; bottom_diff[idxInv] += t_dif; int defidx = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; // if ( (defidx > Nparam) || (defidx < 0) ) // LOG(INFO) << "bp: " << idxInv << "," << ph << "*" << pooled_width_ <<"+" << pw << "=>" << idxInv; // LOG(INFO) << "bp: " << ph << "*" << pooled_width_ <<"+" << pw << "=>" << ihb << "*" << width_ << "+" << ivb; d_defw[ch*Nparam+defidx] -= t_dif; } } // Dtype t_dif = top_diff[0]; // bottom_diff[hstart * width_ + vstart] += t_dif; // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; // LOG(INFO) <<"bp offset:" << (*bottom)[0]->offset(0, 1) << "," <<top[0]->offset(0, 1); bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); break; case PoolingParameter_PoolMethod_LOWRES: // this Lowres does not require bp break; case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
886ae2144c25c7bb329921b2c19523c2e09d87b5.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // LOG(INFO)<<"the entry in forward"; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); Dtype* bottom_diff; Dtype* dh; Dtype* dv; Dtype* Mdt; // int N_; int* defh; int* defv; int* defp; int* Ih; int* Iv; Dtype* defw; int Nparam = kernel_size_*kernel_size_; const Dtype* bottom_data_p; // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_DEF: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); dh_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); dv_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); Iv_.resize((channels_*bottom[0]->num()*height_*width_ )); //each point in inputmap Ih_.resize((channels_*bottom[0]->num()*height_*width_ )); top[0]->Reshape(bottom[0]->num(), channels_, 1, 1); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); for (int ch = 0; ch < channels_; ++ch) { if (defw[ch*4+0] < blobl_a_min_) defw[ch*4+0] = blobl_a_min_; if (defw[ch*4+2] < blobl_a_min_) defw[ch*4+2] = blobl_a_min_; // if (defw[ch*4+1] > 0.2) // defw[ch*4+1] = 0.2; // if (defw[ch*4+3] > 0.2) // defw[ch*4+3] = 0.2; /* for (int i=0; i< 4; ++i) if (fabs(defw[ch*4+i]) > 5) LOG(INFO) << "defw > 5" <<ch <<": " << defw[ch*4+i];*/ //if ( (ch==0) ) // LOG(INFO) << "defw" <<ch <<": " << defw[ch*4+0] << ", "<< defw[ch*4+1]<< ", " << defw[ch*4+2]<< ", " << defw[ch*4+3]; } // LOG(INFO) << "ff defw:" << defw[0*4+0] << ", "<< defw[0*4+1]<< ", " << defw[0*4+2]<< ", " << defw[0*4+3]; // bool Flag_print; N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF"<<bottom[0]->num() <<","<< channels_<<bottom[0]<<","<< pooled_height_; for (int n = 0; n < bottom[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; //Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) // LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); //LOG(INFO) << "defw" <<ch <<": " << defw[ch*4+0] << ", "<< defw[ch*4+1]<< ", " << defw[ch*4+2]<< ", " << defw[ch*4+3]; //LOG(INFO) << "ff defw:" << defw[ch*4+0] << ", "<< defw[ch*4+1]<< ", " << defw[ch*4+2]<< ", " << defw[ch*4+3]; // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"; //LOG(INFO) << defp[ch]<<","<<data_pointer[defp[ch]]<<","<<Mdt[defp[ch]]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; top_data[n*channels_+ch] = Mdt[defp[ch]]; /* // facillitate BP using conv LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.3"; memset(data_pointer2, 0, sizeof(Dtype) * N_); LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; data_pointer2[defp[ch]] = top_data[n*channels_+ch];*/ //LOG(INFO) << "dh_dv" <<ch <<": "<<defh[ch] << ", "<< Ih[(n*channels_+ch)*N_+defp[ch]]<< ", " << defv[ch] << ", "<<Iv[(n*channels_+ch)*N_+defp[ch]]; if ((n==0)&&( top_data[n*channels_+ch] > 10000)) LOG(INFO) << "Forward_gpu dt2!!: vals:" << top_data[n*channels_+ch] <<" from " << data_pointer[defp[ch]]; // obtain the dh and dv of each part dh[n*channels_+ch] = defh[ch] - Ih[(n*channels_+ch)*N_+defp[ch]]; dv[n*channels_+ch] = defv[ch] - Iv[(n*channels_+ch)*N_+defp[ch]]; //CHECK_EQ(Flag_print, 1); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.5"; } } //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.6:" << top_data[0]; top[0]->mutable_gpu_data(); // bottom[0]->mutable_gpu_data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); // LOG(INFO)<<"the entry in DEF_ALL forward"; bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); dh_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); dv_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); Iv_.resize((channels_*bottom[0]->num()*height_*width_ )); //each point in inputmap Ih_.resize((channels_*bottom[0]->num()*height_*width_ )); //tmpIx_.resize((N_ )); // for one map, n = width * height //tmpIy_.resize((N_ )); //defh_.resize((channels_ )); // part location //defv_.resize((channels_ )); //defp_.resize((channels_ )); //Mdt_.resize((N_)); //tmpM_.resize(N_); top[0]->Reshape(bottom[0]->num(), pooled_height_ * pooled_width_, 1, 1); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); memset(dh, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); memset(dv, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); defw = this->blobs_[0]->mutable_cpu_data(); for (int ch = 0; ch < channels_; ++ch) { if (defw[ch*4+0] < blobl_a_min_) defw[ch*4+0] = blobl_a_min_; if (defw[ch*4+2] < blobl_a_min_) defw[ch*4+2] = blobl_a_min_; } N_ = width_ * height_; defh = defh_.data(); // for part horizantal start point defv = defv_.data(); //for vertical start point defp = defp_.data(); //the w*dim0 + h Ih = Ih_.data(); //save the best location for dt Iv = Iv_.data(); // LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2 "<<bottom[0]->num() <<","<<channels_<<","<<width_<<","<<height_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) //LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; /* for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data4[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS]; }*/ dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); /*for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << data_pointer[phS * width_ + pwS] ; LOG(INFO) << "fp data3[" << phS << "," << pwS <<"]:" << "=>" << "[" << ihval << "," << ivval << "]:" << Mdt[phS * width_ + pwS]; }*/ // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"<<","<<n<<","<<ch; //LOG(INFO) << defp[ch]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; // dh[n*channels_+ch] = 0; // dv[n*channels_+ch] = 0; // LOG(INFO)<< "defw:" << defw[ch*4+0] <<"," << defw[ch*4+1] <<","<< defw[ch*4+2]<<","<< defw[ch*4+3]; // for (int ph = 0; ph < pooled_height_; ++ph) { int ph = static_cast<int>(ch/pooled_width_); int pw = ch - ph*pooled_width_; int phS = ph * stride_; // for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; // LOG(INFO) << ph <<","<< pw; int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype hdif = phS - Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype vdif = pwS - Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; // LOG(INFO) << "fp data[" << defval << "]:" << data_pointer[defval] <<","<< Mdt[defval]; // LOG(INFO) << "fp data2:" << phS <<"," <<pwS << "=> " << ihval << "," << ivval << "," << data_pointer[ihval * width_ + ivval]; // top_data[ph * pooled_width_ + pw] = Mdt[phS * width_ + pwS]; top_data[0] = Mdt[phS * width_ + pwS]; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = hdif; dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; // } // } top_data += top[0]->offset(0, 1); // top_data[n*channels_+ch] = Mdt[defp[ch]]; /* // facillitate BP using conv LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.3"; memset(data_pointer2, 0, sizeof(Dtype) * N_); LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; data_pointer2[defp[ch]] = top_data[n*channels_+ch];*/ //LOG(INFO) << "dh_dv" <<ch <<": "<<defh[ch] << ", "<< Ih[(n*channels_+ch)*N_+defp[ch]]<< ", " << defv[ch] << ", "<<Iv[(n*channels_+ch)*N_+defp[ch]]; // if ((n==0)&&( top_data[n*channels_+ch] > 10000)) // LOG(INFO) << "Forward_gpu dt2!!: vals:" << top_data[n*channels_+ch] <<" from " << data_pointer[defp[ch]]; // obtain the dh and dv of each part //CHECK_EQ(Flag_print, 1); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.5"; } } //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.6:" << top_data[0]; top[0]->mutable_gpu_data(); // LOG(INFO)<<"the end in DEF_ALL forward"; // bottom[0]->mutable_gpu_data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL2: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); for (int ch = 0; ch < channels_; ++ch) { if (defw[ch*4+0] < blobl_a_min_) defw[ch*4+0] = blobl_a_min_; if (defw[ch*4+2] < blobl_a_min_) defw[ch*4+2] = blobl_a_min_; } N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2"<<bottom[0]->num() <<","<<channels_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) /* LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data4[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS]; }*/ dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); /* for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data3[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS] << "=>" << "[" << ihval << "," << ivval << "]:" << Mdt[phS * width_ + pwS]; } */ // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"; //LOG(INFO) << defp[ch]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; // dh[n*channels_+ch] = 0; // dv[n*channels_+ch] = 0; // LOG(INFO)<< "defw:" << defw[ch*4+0] <<"," << defw[ch*4+1] <<","<< defw[ch*4+2]<<","<< defw[ch*4+3]; for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype hdif = phS - Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype vdif = pwS - Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; // LOG(INFO) << "fp data[" << defval << "]:" << data_pointer[defval] <<","<< data_pointer[defval-1] <<"," << data_pointer[defval+1] << ", "<< Mdt[defval]; // LOG(INFO) << "fp data2:" << phS <<"," <<pwS << "=> " << ihval << "," << ivval << "," << data_pointer[ihval * width_ + ivval]; top_data[ph * pooled_width_ + pw] = data_pointer[ihval * width_ + ivval]; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = hdif; dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; } } top_data += top[0]->offset(0, 1); // top_data[n*channels_+ch] = Mdt[defp[ch]]; /* // facillitate BP using conv LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.3"; memset(data_pointer2, 0, sizeof(Dtype) * N_); LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; data_pointer2[defp[ch]] = top_data[n*channels_+ch];*/ //LOG(INFO) << "dh_dv" <<ch <<": "<<defh[ch] << ", "<< Ih[(n*channels_+ch)*N_+defp[ch]]<< ", " << defv[ch] << ", "<<Iv[(n*channels_+ch)*N_+defp[ch]]; // if ((n==0)&&( top_data[n*channels_+ch] > 10000)) // LOG(INFO) << "Forward_gpu dt2!!: vals:" << top_data[n*channels_+ch] <<" from " << data_pointer[defp[ch]]; // obtain the dh and dv of each part //CHECK_EQ(Flag_print, 1); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.5"; } } //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.6:" << top_data[0]; top[0]->mutable_gpu_data(); break; case PoolingParameter_PoolMethod_DEF_ALL3: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF_ALL3"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2"<<bottom[0]->num() <<","<<channels_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); /* for (int ch = 0; ch < channels_; ++ch) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { int idx = ch*Nparam+ph*pooled_height_+pw; if (defw[idx] < 0) defw[idx] = 0; } } }*/ for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; int center = kernel_size_/2; int def_center = center+center*kernel_size_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); /* Flag_print = 1; for (int h =0; h < height_; h++) for (int v =0; (v < width_)&&Flag_print; v++) if (data_pointer[h*width_+v] > 100) { LOG(INFO) << "data_pointer["<< h <<"][" <<v <<"] > 100," <<data_pointer[h*width_+v] <<"," <<n <<"," << ch; Flag_print = false; }*/ // Dtype* data_pointer2 = top_buffer_data + top_buffer_.offset(n, ch); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.1 "<<"ch="<< ch ; //<<"ch="<< ch <<" offset:" << top_buffer_.offset(0, ch) << " " << data_pointer[0]; //if ( (n==0) && data_pointer[0] > 1) /* LOG(INFO) << "Forward_gpu dt1:" <<"ch="<< ch <<" offset:" << top_buffer_.offset(n, ch) << "vals: " << data_pointer[0] <<", " << data_pointer[4]<<"," << height_ << "," << width_; for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data4[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS]; }*/ // dt(width_, height_, data_pointer, defw[ch*4+0], defw[ch*4+1], defw[ch*4+2], defw[ch*4+3], n, ch); /* for (int phS=0; phS<height_; ++phS) for (int pwS = 0; pwS < width_; ++pwS) { int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; LOG(INFO) << "fp data3[" << phS << "," << pwS <<"]:" << data_pointer[phS * width_ + pwS] << "=>" << "[" << ihval << "," << ivval << "]:" << Mdt[phS * width_ + pwS]; } */ // obtain the score of each part in each channel //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.2"; //LOG(INFO) << defp[ch]; //LOG(INFO) << n*channels_+ch; //if (Mdt[defp[ch]] > 100) // LOG(INFO) << "fp data:" << defp[ch] << "," << data_pointer[defp[ch]] <<","<< data_pointer[defp[ch]-1] <<"," << data_pointer[defp[ch]+1] << ", "<< Mdt[defp[ch]]; // dh[n*channels_+ch] = 0; // dv[n*channels_+ch] = 0; // LOG(INFO)<< "defw:" << defw[ch*4+0] <<"," << defw[ch*4+1] <<","<< defw[ch*4+2]<<","<< defw[ch*4+3]; for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; if (phS > height_-kernel_size_) phS = height_ - kernel_size_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; int defwidx; int maxIdx; int hstart = ph * stride_h_ - pad_h_; int wstart = pw * stride_w_ - pad_w_; int hstart_ori = hstart; int wstart_ori = wstart; int hend = min(hstart + kernel_h_, height_); int wend = min(wstart + kernel_w_, width_); hstart = max(hstart, 0); wstart = max(wstart, 0); const int pool_index = ph * pooled_width_ + pw; Dtype maxVal = -FLT_MAX; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int index = h * width_ + w; int kh = h-hstart_ori; int kv = w-wstart_ori; int defwidx = kh*kernel_size_+kv; Dtype CurVal1 = data_pointer[index]; Dtype CurVal2 = CurVal1+defw[ch*Nparam+defwidx]; if (CurVal2 > maxVal) { maxVal = CurVal2; // mask[pool_index] = index; maxIdx = defwidx; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = index; } } } top_data[pool_index] = maxVal; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = maxIdx; /* int pwS = pw * stride_; int defval = phS * width_ + pwS; int defwidx; int maxIdx; if (pwS > width_-kernel_size_) pwS = width_ - kernel_size_; maxIdx = def_center; Dtype maxVal = -FLT_MAX; defwidx = 0; for (int kh=0; kh<kernel_size_; kh++) for (int kv=0; kv<kernel_size_; kv++) { Dtype CurVal1 = data_pointer[(phS+kh) * width_ + pwS+kv]; Dtype CurVal2 = CurVal1+defw[ch*Nparam+defwidx]; if (fabs(CurVal1-CurVal2) > 0.0001) LOG(INFO) << "CurVal1, 2:" << CurVal1 <<", "<< CurVal2; if (maxVal < CurVal2) { maxVal = CurVal2; maxIdx = defwidx; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = (phS+kh) * width_ + pwS+kv; } defwidx++; } top_data[ph * pooled_width_ + pw] = maxVal; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = maxIdx; // dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; */ } } top_data += top[0]->offset(0, 1); } } top[0]->mutable_gpu_data(); break; case PoolingParameter_PoolMethod_DEF_ALL4: //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 1"; // top_buffer_data = top_buffer_.mutable_cpu_data(); bottom_diff = (bottom)[0]->mutable_cpu_diff(); memset(bottom_diff, 0, top[0]->num()*channels_*width_*height_*sizeof(Dtype)); bottom_diff=(bottom)[0]->mutable_gpu_diff(); bottom_data_p = bottom[0]->cpu_data(); top_data = top[0]->mutable_cpu_data(); Mdt = Mdt_.data(); // Mdt = reinterpret_cast<Dtype*>(Mdt_->mutable_cpu_data()); // caffe_copy(channels_*height_*width_, bottom_data, top_buffer_data); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); defw = this->blobs_[0]->mutable_cpu_data(); N_ = width_ * height_; defh = defh_.data(); defv = defv_.data(); defp = defp_.data(); Ih = Ih_.data(); Iv = Iv_.data(); //LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2"<<bottom[0]->num() <<","<<channels_; // LOG(INFO) << "fp offset:" << bottom[0]->offset(0, 1); for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; for (int ch = 0; ch < channels_; ++ch) { const Dtype* data_pointer = bottom_data_p + bottom[0]->offset(n, ch); for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; if (phS > height_-kernel_size_) phS = height_ - kernel_size_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int defval = phS * width_ + pwS; int defwidx; int maxIdx; maxIdx = 0; if (pwS > width_-kernel_size_) pwS = width_ - kernel_size_; Dtype maxVal1 = data_pointer[phS * width_ + pwS]; Dtype maxVal = maxVal1+defw[ch*Nparam+0]; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = defval; defwidx = 0; for (int kh=0; kh<kernel_size_; kh++) for (int kv=0; kv<kernel_size_; kv++) { Dtype CurVal1 = data_pointer[(phS+kh) * width_ + pwS+kv]; Dtype CurVal2 = CurVal1+defw[ch*Nparam+defwidx]; if (maxVal < CurVal2) { maxVal = CurVal2; maxVal1 = CurVal1; maxIdx = defwidx; Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = (phS+kh) * width_ + pwS+kv; } defwidx++; } top_data[ph * pooled_width_ + pw] = maxVal1; dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = maxIdx; // dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw] = vdif; } } top_data += top[0]->offset(0, 1); } } top[0]->mutable_gpu_data(); break; case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //LOG(INFO)<<"the entry in backward"; if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; Dtype* d_defw; Dtype* dh; Dtype* dv; int* Iv; int* Ih; int* defp; int N2; int Nparam = kernel_size_*kernel_size_; const Dtype* bottom_data_p; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_DEF: //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF"; /* top_data = top[0]->cpu_data(); bottom_data = (*bottom)[0]->cpu_data();*/ bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); // LOG(INFO)<<"top_channels:"<<top[0]->channels(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 1"; Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); // Ih = reinterpret_cast<int*>(Ih_->mutable_cpu_data()); // Iv = reinterpret_cast<int*>(Iv_->mutable_cpu_data()); // defp = reinterpret_cast<int*>(defp_->mutable_cpu_data()); d_defw = this->blobs_[0]->mutable_cpu_diff(); // memset(d_defw, 0, channels_*4* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2"; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.1"; int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.2"; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.3"; Dtype t_dif = top_diff[0]; // Dtype t_dif = top_diff[n*channels_+ch]; /* if ( (n==0) && (ch==0) ) for (int h = hstart; h < hend; ++h) { for (int v = vstart; v < vend; ++v) { bottom_diff[h * width_ + v] = t_dif; } } else*/ /* for (int h = hstart; h < hend; ++h) { for (int v = vstart; v < vend; ++v) { bottom_diff[h * width_ + v] = t_dif; } } */ bottom_diff[hstart * width_ + vstart] += t_dif; /* if (fabs(t_dif) > 1) LOG(INFO) << "bp t_dif>1:" << t_dif << ", "<< ch << "," <<n; if ( (n==0) && (fabs(t_dif) > 0.01)) LOG(INFO) << "Backward_gpu!!:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); */ // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ if (n==0) { d_defw[ch*4+0] =-t_dif*dv[n*channels_+ch]*dv[n*channels_+ch]; d_defw[ch*4+1] =-t_dif*dv[n*channels_+ch]; d_defw[ch*4+2] =-t_dif*dh[n*channels_+ch]*dh[n*channels_+ch]; d_defw[ch*4+3] =-t_dif*dh[n*channels_+ch]; } else { d_defw[ch*4+0] -= t_dif*dv[n*channels_+ch]*dv[n*channels_+ch]; d_defw[ch*4+1] -= t_dif*dv[n*channels_+ch]; d_defw[ch*4+2] -= t_dif*dh[n*channels_+ch]*dh[n*channels_+ch]; d_defw[ch*4+3] -= t_dif*dh[n*channels_+ch]; } //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); // top[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL: // LOG(INFO)<<"the entry in DEF_ALL backward"; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF"; bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); d_defw = this->blobs_[0]->mutable_cpu_diff(); memset(d_defw, 0, channels_*4* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); N2 = pooled_width_*pooled_height_; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2"; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.1"; int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.2"; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.3"; //for (int ph = 0; ph < pooled_height_; ++ph) { int ph = static_cast<int>(ch/pooled_width_); int pw = ch - ph*pooled_width_; int phS = ph * stride_; // for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int ihb = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivb = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype t_dif = top_diff[ph * pooled_width_ + pw]; bottom_diff[ihb * width_ + ivb] += t_dif; // LOG(INFO) << "bp: " << ph << "*" << pooled_width_ <<"+" << pw << "=>" << ihb << "*" << width_ << "+" << ivb; Dtype D1, D2, D3, D4; D1 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D2 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D3 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D4 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; if (n==0) { d_defw[ch*4+0] =-t_dif*D1; d_defw[ch*4+1] =-t_dif*D2; d_defw[ch*4+2] =-t_dif*D3; d_defw[ch*4+3] =-t_dif*D4; } else { d_defw[ch*4+0] -= t_dif*D1; d_defw[ch*4+1] -= t_dif*D2; d_defw[ch*4+2] -= t_dif*D3; d_defw[ch*4+3] -= t_dif*D4; } // } // } // Dtype t_dif = top_diff[0]; // bottom_diff[hstart * width_ + vstart] += t_dif; // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; // LOG(INFO) <<"bp offset:" << (*bottom)[0]->offset(0, 1) << "," <<top[0]->offset(0, 1); bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); // top[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); // LOG(INFO)<<"the end in DEF_ALL backward"; //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL2: bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); d_defw = this->blobs_[0]->mutable_cpu_diff(); memset(d_defw, 0, channels_*4* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); N2 = pooled_width_*pooled_height_; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_); for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int ihb = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivb = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype t_dif = top_diff[ph * pooled_width_ + pw]; bottom_diff[ihb * width_ + ivb] += t_dif; // LOG(INFO) << "bp: " << ph << "*" << pooled_width_ <<"+" << pw << "=>" << ihb << "*" << width_ << "+" << ivb; Dtype D1, D2, D3, D4; D1 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D2 = dv[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D3 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]*dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; D4 = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; if (n==0) { d_defw[ch*4+0] =-t_dif*D1; d_defw[ch*4+1] =-t_dif*D2; d_defw[ch*4+2] =-t_dif*D3; d_defw[ch*4+3] =-t_dif*D4; } else { d_defw[ch*4+0] -= t_dif*D1; d_defw[ch*4+1] -= t_dif*D2; d_defw[ch*4+2] -= t_dif*D3; d_defw[ch*4+3] -= t_dif*D4; } } } // Dtype t_dif = top_diff[0]; // bottom_diff[hstart * width_ + vstart] += t_dif; // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; // LOG(INFO) <<"bp offset:" << (*bottom)[0]->offset(0, 1) << "," <<top[0]->offset(0, 1); bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); // top[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 3"; break; case PoolingParameter_PoolMethod_DEF_ALL3: case PoolingParameter_PoolMethod_DEF_ALL4: //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF_ALL3"; bottom_diff = bottom[0]->mutable_cpu_diff(); top_diff = top[0]->cpu_diff(); Ih = Ih_.data(); Iv = Iv_.data(); defp = defp_.data(); d_defw = this->blobs_[0]->mutable_cpu_diff(); memset(d_defw, 0, channels_*kernel_size_*kernel_size_* sizeof(Dtype)); dh = dh_.mutable_cpu_data(); dv = dv_.mutable_cpu_data(); N2 = pooled_width_*pooled_height_; for (int n = 0; n < top[0]->num(); ++n) { for (int ch = 0; ch < channels_; ++ch) { /* int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_);*/ for (int ph = 0; ph < pooled_height_; ++ph) { int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; int idxInv = Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; Dtype t_dif = top_diff[ph * pooled_width_ + pw]; bottom_diff[idxInv] += t_dif; int defidx = dh[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; // if ( (defidx > Nparam) || (defidx < 0) ) // LOG(INFO) << "bp: " << idxInv << "," << ph << "*" << pooled_width_ <<"+" << pw << "=>" << idxInv; // LOG(INFO) << "bp: " << ph << "*" << pooled_width_ <<"+" << pw << "=>" << ihb << "*" << width_ << "+" << ivb; d_defw[ch*Nparam+defidx] -= t_dif; } } // Dtype t_dif = top_diff[0]; // bottom_diff[hstart * width_ + vstart] += t_dif; // LOG(INFO) << "Backward_gpu:" << "," << kernel_size_ <<"," <<width_ <<"," << bottom_diff[hstart * width_ + vstart] << ","<< hstart<<"," <<hend <<","<< vstart <<","<<vend<<","<<(*bottom)[0]->offset(0, 1); // dv for 0 ann 1, dh for 2 and 3 // LOG(INFO) << "bp t_dif:" << t_dif << ", "<< ch << "," << dv[n*channels_+ch]<< ", " << dh[n*channels_+ch]<< "," << top[0]+>offset(0, 1); //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.4"; /* d_defw[ch*4+0] = 0; d_defw[ch*4+1] = 0; d_defw[ch*4+2] = 0; d_defw[ch*4+3] = 0;*/ //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF 2.5"; //if ( (n==top[0]->num()-1) && (ch==0) ) // LOG(INFO) <<bottom_diff[0] << ", "<< "bp d_defw[" <<ch <<"]:" << d_defw[ch*4+0] << ", "<< d_defw[ch*4+1]<< ", " << d_defw[ch*4+2]<< ", " << d_defw[ch*4+3]; // LOG(INFO) <<"bp offset:" << (*bottom)[0]->offset(0, 1) << "," <<top[0]->offset(0, 1); bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } //LOG(INFO) << "bp d_defw:" << d_defw[0*4+0] << ", "<< d_defw[0*4+1]<< ", " << d_defw[0*4+2]<< ", " << d_defw[0*4+3]; bottom_diff=bottom[0]->mutable_gpu_diff(); this->blobs_[0]->mutable_gpu_diff(); break; case PoolingParameter_PoolMethod_LOWRES: // this Lowres does not require bp break; case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
79afba0c8527f1b5668791e7295b89ee17c28027.hip
// !!! This is a file automatically generated by hipify!!! #include "cuTrasformation.cuh" #include <math.h> #include <stdio.h> #include "cuMatrix.h" #include "util.h" #include "hip/hip_runtime.h" #include <hiprand/hiprand_kernel.h> #include <time.h> #include "Config.h" #include <helper_functions.h> #include <helper_cuda.h> #define GAUSSIAN_FIELD_SIZE (21) /* strictly odd number */ hiprandGenerator_t rand_generator_device; const hiprandRngType_t generator_type = HIPRAND_RNG_PSEUDO_DEFAULT; cuMatrix<double>* cuGaussianKernel; cuMatrix<double>* cuDispH; cuMatrix<double>* cuDispV; float * cu_d_randonNumf; double* cu_d_randomNum; double* cu_h_randomNum; double dElasticSigma = 4.0; /* higher numbers are more smooth and less distorted; Simard uses 4.0*/ int getRandomNumLen(int batch, int ImgSize) { return batch * ImgSize * ImgSize * 2; } /* * blocks : dim3(1) * threads: dim3(GAUSSIAN_FIELD_SIZE*GAUSSIAN_FIELD_SIZE) */ __global__ void g_createGaussianKernel(double* gaussian, double dElasticSigma, int ImgSize) { int iiMid = GAUSSIAN_FIELD_SIZE >> 1; double doubleElasticSigma = dElasticSigma * dElasticSigma; int row = threadIdx.x % ImgSize; int col = threadIdx.x / ImgSize; double val1 = 1.0 / (dElasticSigma * 2.0 * 3.1415926535897932384626433832795); double val2 = (row-iiMid)*(row-iiMid) + (col-iiMid)*(col-iiMid); gaussian[threadIdx.x] = val1 * exp(-1.0 * val2 / (2.0 * doubleElasticSigma)); } void cuInitDistortionMemery(int batch, int ImgSize) { hiprandStatus_t curandstatus; cuGaussianKernel = new cuMatrix<double>(GAUSSIAN_FIELD_SIZE, GAUSSIAN_FIELD_SIZE, 1); if(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE > MAX_THREADS) { printf("g_createGaussianKernel > MAX_THREADS\n"); exit(0); } hipLaunchKernelGGL(( g_createGaussianKernel), dim3(dim3(1)),dim3(dim3(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE)), 0, 0, cuGaussianKernel->devData, dElasticSigma, ImgSize); hipDeviceSynchronize(); /*cu_d_randomNum*/ checkCudaErrors(hipMalloc((void**)&cu_d_randomNum, sizeof(double) * getRandomNumLen(batch, ImgSize))); /*cu_d_randonNumf*/ checkCudaErrors(hipMalloc((void**)&cu_d_randonNumf, sizeof(float) * getRandomNumLen(batch, ImgSize))); /*cu_h_randomNum*/ cu_h_randomNum = (double*)malloc(sizeof(double) * getRandomNumLen(batch, ImgSize)); if(!cu_h_randomNum) { printf("malloc cu_h_randomNum fail\n"); exit(0); } /*hiprandCreateGenerator*/ curandstatus = hiprandCreateGenerator(&rand_generator_device, generator_type); if(curandstatus != HIPRAND_STATUS_SUCCESS) { printf("hiprandCreateGenerator fail\n"); exit(0); } cuDispV = new cuMatrix<double>(batch, ImgSize * ImgSize, 1); cuDispH = new cuMatrix<double>(batch, ImgSize * ImgSize, 1); } __global__ void g_getRandomUniform(float* r1, double* r2, int len) { for(int i = 0; i < len; i += gridDim.x * blockDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { r2[id] = r1[id] * 2.0f - 1.0f; } } } /* * blocks : dim3(batch) * threads : dim3(512) */ __global__ void g_generateDistortionMap( double* _dispH, double* _dispV, double* rand, double* gaussianKernel, double dElasticScaling, double dMaxScaling, double dMaxRotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; double* uniformH = rand + blockIdx.x * ImgSize2; double* uniformV = rand + blockIdx.x * ImgSize2 * 2; double* dispH = _dispH + ImgSize2 * blockIdx.x; double* dispV = _dispV + ImgSize2 * blockIdx.x; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; int iiMid = GAUSSIAN_FIELD_SIZE / 2; double fConvolvedH = 0.0; double fConvolvedV = 0.0; double fSampleH, fSampleV; double elasticScale = dElasticScaling; for(int xxx = 0; xxx < GAUSSIAN_FIELD_SIZE; ++xxx) { for(int yyy = 0; yyy < GAUSSIAN_FIELD_SIZE; ++yyy) { int xxxDisp = col - iiMid + xxx; int yyyDisp = row - iiMid + yyy; if(xxxDisp < 0 || xxxDisp >= ImgSize || yyyDisp < 0 || yyyDisp >= ImgSize) { fSampleH = 0.0; fSampleV = 0.0; } else { fSampleH = uniformH[yyyDisp * ImgSize + xxxDisp]; fSampleV = uniformV[yyyDisp * ImgSize + xxxDisp]; } fConvolvedH += fSampleH * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx]; fConvolvedV += fSampleV * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx]; } } dispH[idx] = elasticScale * fConvolvedH; dispV[idx] = elasticScale * fConvolvedV; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double dSFHoriz = dMaxScaling / 100.0 * rand[blockIdx.x]; double dSFVert = dMaxScaling / 100.0 * rand[blockIdx.x + 1]; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double angle = dMaxRotation * rand[blockIdx.x]; angle = angle * 3.1415926535897932384626433832795 / 180.0; double cosAngle = cos(angle); double sinAngle = sin(angle); int iMid = ImgSize / 2; double xx = row - iMid; double yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } /*dim3(batch),dim3(ImgSize,ImgSize)*/ __global__ void g_scaleAndRotate( double* _dispH, double* _dispV, double scaling, double rotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; double* dispH = _dispH + ImgSize2 * blockIdx.x; double* dispV = _dispV + ImgSize2 * blockIdx.x; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { dispH[idx] = 0.0; dispV[idx] = 0.0; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double dSFHoriz = scaling / 100.0; double dSFVert = scaling / 100.0; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double angle = rotation; angle = angle * 3.1415926535897932384626433832795 / 180.0; double cosAngle = cos(angle); double sinAngle = sin(angle); int iMid = ImgSize / 2; double xx = row - iMid; double yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } /* * blocks : dim3(batch, Config::instance()->getChannels()) * threads: dim3(min(512, ImgSize * ImgSize)) */ __global__ void g_applyDistortionMap( double** _inputs, double** _outputs, double* _dispH, double* _dispV, int ImgSize) { int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize; double* input = _inputs[blockIdx.x] + ImgSize2 * c; double* output= _outputs[blockIdx.x]+ ImgSize2 * c; double* dispV = _dispV + blockIdx.x * ImgSize2; double* dispH = _dispH + blockIdx.x * ImgSize2; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double sourceRow, sourceCol; double fracRow, fracCol; double w1, w2, w3, w4; double sourceValue; int sRow, sCol, sRowp1, sColp1; bool bSkipOutOfBounds; if(fabs(dispV[idx]) < 0.000000001 && fabs(dispH[idx]) < 0.0000000001) { output[idx] = input[idx]; continue; } sourceRow = (double)row - dispV[idx]; sourceCol = (double)col - dispH[idx]; fracRow = sourceRow - (int)sourceRow; fracCol = sourceCol - (int)sourceCol; w1 = ( 1.0 - fracRow ) * ( 1.0 - fracCol ); w2 = ( 1.0 - fracRow ) * fracCol; w3 = fracRow * ( 1.0 - fracCol ); w4 = fracRow * fracCol; bSkipOutOfBounds = false; if ( ((int)sourceRow + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceRow < 0 ) bSkipOutOfBounds = true; if ( ((int)sourceCol + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceCol < 0 ) bSkipOutOfBounds = true; if ( bSkipOutOfBounds == false ) { sRow = (int)sourceRow; sCol = (int)sourceCol; sRowp1 = sRow + 1; sColp1 = sCol + 1; while (sRowp1 >= ImgSize) sRowp1 -= ImgSize; while (sRowp1 < 0) sRowp1 += ImgSize; while (sColp1 >= ImgSize) sColp1 -= ImgSize; while (sColp1 < 0) sColp1 += ImgSize; while (sRow >= ImgSize) sRow -= ImgSize; while (sRow < 0) sRow += ImgSize; while (sCol >= ImgSize) sCol -= ImgSize; while (sCol < 0) sCol += ImgSize; sourceValue = w1 * input[sRow * ImgSize + sCol] + w2 * input[sRow * ImgSize + sColp1] + w3 * input[sRowp1 * ImgSize + sCol] + w4 * input[sRowp1 * ImgSize + sColp1]; } else { sourceValue = -1.0; } output[idx] = sourceValue; } } } void cuApplyRandom(int batch, unsigned long long s, int ImgSize) { hiprandStatus_t hiprandStatus_t; unsigned long long seed = s; hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(rand_generator_device, seed); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { printf("hiprandSetPseudoRandomGeneratorSeed fail\n"); exit(0); } hiprandGenerateUniform(rand_generator_device, cu_d_randonNumf, getRandomNumLen(batch, ImgSize)); hipLaunchKernelGGL(( g_getRandomUniform), dim3(dim3(256)),dim3(dim3(256)), 0, 0, cu_d_randonNumf, cu_d_randomNum, getRandomNumLen(batch, ImgSize)); hipDeviceSynchronize(); getLastCudaError("g_getRandomUniform"); int threads = min(512, ImgSize * ImgSize); hipLaunchKernelGGL(( g_generateDistortionMap), dim3(dim3(batch)),dim3(threads), 0, 0, cuDispH->devData, cuDispV->devData, cu_d_randomNum, cuGaussianKernel->devData, Config::instance()->getDistortion(), Config::instance()->getScale(), Config::instance()->getRotation(), ImgSize); hipDeviceSynchronize(); getLastCudaError("g_generateDistortionMap"); } void cuApplyScaleAndRotate(int batch, int ImgSize, double scaling, double rotation) { hipLaunchKernelGGL(( g_scaleAndRotate), dim3(dim3(batch)),dim3(dim3(512)), 0, 0, cuDispH->devData, cuDispV->devData, scaling, rotation, ImgSize); hipDeviceSynchronize(); getLastCudaError("g_generateDistortionMap"); } void cuApplyDistortion(double**inputs, double**outputs, int batch, int ImgSize) { int threadidx = min(ImgSize * ImgSize, 512); hipLaunchKernelGGL(( g_applyDistortionMap), dim3(dim3(batch, Config::instance()->getChannels())), dim3( dim3(threadidx)), 0, 0, inputs, outputs, cuDispH->devData, cuDispV->devData, ImgSize); hipDeviceSynchronize(); getLastCudaError("g_applyDistortionMap"); } /* * blocks : dim3(batch, channels) * threads : dim3(min(ImgSize*ImgSize, 512)) */ __global__ void g_applyCropRandom(double**_inputs, double**_outputs, double* random, int crop, int ImgSize) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + crop; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; double* input = _inputs [blockIdx.x] + c * inputImgSize2; double* output= _outputs[blockIdx.x]+ c * outputImgSize2; int sx =(int)((random[blockIdx.x] + 1.0) * 0.5 * crop); int sy =(int)((random[blockIdx.x + 1] + 1.0) * 0.5 * crop); if(sx > crop) sx = crop; if(sy > crop) sy = crop; if(sx < 0) sx = 0; if(sy < 0) sy = 0; for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } /* * blocks : dim3(batch, channels) * threads: dim3(min(ImgSize * ImgSize, 512); */ __global__ void g_applyCrop(double**_inputs, double**_outputs, double* random, int croplen, int ImgSize, int cropr, int cropc) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + croplen; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; double* input = _inputs [blockIdx.x]+ c * inputImgSize2 ; double* output= _outputs[blockIdx.x]+ c * outputImgSize2; int sx = cropr; int sy = cropc; for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } void cuApplyCropRandom(double**inputs, double**outputs, int batch, int ImgSize) { int threads = min(512, ImgSize * ImgSize); hipLaunchKernelGGL(( g_applyCropRandom), dim3(dim3(batch, Config::instance()->getChannels())), dim3( dim3(threads)), 0, 0, inputs, outputs, cu_d_randomNum, Config::instance()->getCrop(), ImgSize); hipDeviceSynchronize(); getLastCudaError("g_applyCropRandom"); } void cuApplyCrop(double**inputs, double**outputs, int batch, int ImgSize, int cropr, int cropc) { int threads = min(512, ImgSize * ImgSize); hipLaunchKernelGGL(( g_applyCrop), dim3(dim3(batch, Config::instance()->getChannels())), dim3( dim3(threads)), 0, 0, inputs, outputs,cu_d_randomNum, Config::instance()->getCrop(), ImgSize, cropr, cropc); hipDeviceSynchronize(); getLastCudaError("g_applyCrop"); } __device__ void swap(double& val1, double& val2){ double tmp = val1; val1 = val2; val2 = tmp; } /* * function: orizontal Reflection * blocks : dim3(batch, Config::instance()->getChannels()), * threads : dim3(threads) * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ __global__ void g_applyHorizontal(double**_inputs, double**_outputs, double* rand, int ImgSize, int flag) { int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize ; double* input = _inputs[blockIdx.x] + c * ImgSize2; double* output= _outputs[blockIdx.x]+ c * ImgSize2; int half = ImgSize / 2; for(int is = 0; is < half * ImgSize; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < half * ImgSize) { int ox = idx / half; int oy = idx % half; int ix = ox; int iy = ImgSize - oy - 1; if(flag == RANDOM_HORIZONTAL) { if(rand[blockIdx.x] <= 0.0){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + iy], input[ix * ImgSize + iy]); } } else if(flag == HORIZONTAL){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + iy], input[ix * ImgSize + iy]); } else if(flag == NOT_HORIZONTAL){ } } } } /* * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ void cuApplyHorizontal(double **inputs, double**outputs, int batch, int ImgSize, int flag) { int threads = ::min(ImgSize * ImgSize / 2, 512); hipLaunchKernelGGL(( g_applyHorizontal), dim3(dim3(batch, Config::instance()->getChannels())), dim3( dim3(threads)), 0, 0, inputs, outputs, cu_d_randomNum, ImgSize, flag); hipDeviceSynchronize(); getLastCudaError("g_applyHorizontal"); }
79afba0c8527f1b5668791e7295b89ee17c28027.cu
#include "cuTrasformation.cuh" #include <math.h> #include <stdio.h> #include "cuMatrix.h" #include "util.h" #include "cuda_runtime.h" #include <curand_kernel.h> #include <time.h> #include "Config.h" #include <helper_functions.h> #include <helper_cuda.h> #define GAUSSIAN_FIELD_SIZE (21) /* strictly odd number */ curandGenerator_t rand_generator_device; const curandRngType_t generator_type = CURAND_RNG_PSEUDO_DEFAULT; cuMatrix<double>* cuGaussianKernel; cuMatrix<double>* cuDispH; cuMatrix<double>* cuDispV; float * cu_d_randonNumf; double* cu_d_randomNum; double* cu_h_randomNum; double dElasticSigma = 4.0; /* higher numbers are more smooth and less distorted; Simard uses 4.0*/ int getRandomNumLen(int batch, int ImgSize) { return batch * ImgSize * ImgSize * 2; } /* * blocks : dim3(1) * threads: dim3(GAUSSIAN_FIELD_SIZE*GAUSSIAN_FIELD_SIZE) */ __global__ void g_createGaussianKernel(double* gaussian, double dElasticSigma, int ImgSize) { int iiMid = GAUSSIAN_FIELD_SIZE >> 1; double doubleElasticSigma = dElasticSigma * dElasticSigma; int row = threadIdx.x % ImgSize; int col = threadIdx.x / ImgSize; double val1 = 1.0 / (dElasticSigma * 2.0 * 3.1415926535897932384626433832795); double val2 = (row-iiMid)*(row-iiMid) + (col-iiMid)*(col-iiMid); gaussian[threadIdx.x] = val1 * exp(-1.0 * val2 / (2.0 * doubleElasticSigma)); } void cuInitDistortionMemery(int batch, int ImgSize) { curandStatus_t curandstatus; cuGaussianKernel = new cuMatrix<double>(GAUSSIAN_FIELD_SIZE, GAUSSIAN_FIELD_SIZE, 1); if(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE > MAX_THREADS) { printf("g_createGaussianKernel > MAX_THREADS\n"); exit(0); } g_createGaussianKernel<<<dim3(1),dim3(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE)>>>( cuGaussianKernel->devData, dElasticSigma, ImgSize); cudaDeviceSynchronize(); /*cu_d_randomNum*/ checkCudaErrors(cudaMalloc((void**)&cu_d_randomNum, sizeof(double) * getRandomNumLen(batch, ImgSize))); /*cu_d_randonNumf*/ checkCudaErrors(cudaMalloc((void**)&cu_d_randonNumf, sizeof(float) * getRandomNumLen(batch, ImgSize))); /*cu_h_randomNum*/ cu_h_randomNum = (double*)malloc(sizeof(double) * getRandomNumLen(batch, ImgSize)); if(!cu_h_randomNum) { printf("malloc cu_h_randomNum fail\n"); exit(0); } /*curandCreateGenerator*/ curandstatus = curandCreateGenerator(&rand_generator_device, generator_type); if(curandstatus != CURAND_STATUS_SUCCESS) { printf("curandCreateGenerator fail\n"); exit(0); } cuDispV = new cuMatrix<double>(batch, ImgSize * ImgSize, 1); cuDispH = new cuMatrix<double>(batch, ImgSize * ImgSize, 1); } __global__ void g_getRandomUniform(float* r1, double* r2, int len) { for(int i = 0; i < len; i += gridDim.x * blockDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { r2[id] = r1[id] * 2.0f - 1.0f; } } } /* * blocks : dim3(batch) * threads : dim3(512) */ __global__ void g_generateDistortionMap( double* _dispH, double* _dispV, double* rand, double* gaussianKernel, double dElasticScaling, double dMaxScaling, double dMaxRotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; double* uniformH = rand + blockIdx.x * ImgSize2; double* uniformV = rand + blockIdx.x * ImgSize2 * 2; double* dispH = _dispH + ImgSize2 * blockIdx.x; double* dispV = _dispV + ImgSize2 * blockIdx.x; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; int iiMid = GAUSSIAN_FIELD_SIZE / 2; double fConvolvedH = 0.0; double fConvolvedV = 0.0; double fSampleH, fSampleV; double elasticScale = dElasticScaling; for(int xxx = 0; xxx < GAUSSIAN_FIELD_SIZE; ++xxx) { for(int yyy = 0; yyy < GAUSSIAN_FIELD_SIZE; ++yyy) { int xxxDisp = col - iiMid + xxx; int yyyDisp = row - iiMid + yyy; if(xxxDisp < 0 || xxxDisp >= ImgSize || yyyDisp < 0 || yyyDisp >= ImgSize) { fSampleH = 0.0; fSampleV = 0.0; } else { fSampleH = uniformH[yyyDisp * ImgSize + xxxDisp]; fSampleV = uniformV[yyyDisp * ImgSize + xxxDisp]; } fConvolvedH += fSampleH * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx]; fConvolvedV += fSampleV * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx]; } } dispH[idx] = elasticScale * fConvolvedH; dispV[idx] = elasticScale * fConvolvedV; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double dSFHoriz = dMaxScaling / 100.0 * rand[blockIdx.x]; double dSFVert = dMaxScaling / 100.0 * rand[blockIdx.x + 1]; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double angle = dMaxRotation * rand[blockIdx.x]; angle = angle * 3.1415926535897932384626433832795 / 180.0; double cosAngle = cos(angle); double sinAngle = sin(angle); int iMid = ImgSize / 2; double xx = row - iMid; double yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } /*�̷߳��䣺dim3(batch),dim3(ImgSize,ImgSize)*/ __global__ void g_scaleAndRotate( double* _dispH, double* _dispV, double scaling, double rotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; double* dispH = _dispH + ImgSize2 * blockIdx.x; double* dispV = _dispV + ImgSize2 * blockIdx.x; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { dispH[idx] = 0.0; dispV[idx] = 0.0; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double dSFHoriz = scaling / 100.0; double dSFVert = scaling / 100.0; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double angle = rotation; angle = angle * 3.1415926535897932384626433832795 / 180.0; double cosAngle = cos(angle); double sinAngle = sin(angle); int iMid = ImgSize / 2; double xx = row - iMid; double yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } /* * blocks : dim3(batch, Config::instance()->getChannels()) * threads: dim3(min(512, ImgSize * ImgSize)) */ __global__ void g_applyDistortionMap( double** _inputs, double** _outputs, double* _dispH, double* _dispV, int ImgSize) { int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize; double* input = _inputs[blockIdx.x] + ImgSize2 * c; double* output= _outputs[blockIdx.x]+ ImgSize2 * c; double* dispV = _dispV + blockIdx.x * ImgSize2; double* dispH = _dispH + blockIdx.x * ImgSize2; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; double sourceRow, sourceCol; double fracRow, fracCol; double w1, w2, w3, w4; double sourceValue; int sRow, sCol, sRowp1, sColp1; bool bSkipOutOfBounds; if(fabs(dispV[idx]) < 0.000000001 && fabs(dispH[idx]) < 0.0000000001) { output[idx] = input[idx]; continue; } sourceRow = (double)row - dispV[idx]; sourceCol = (double)col - dispH[idx]; fracRow = sourceRow - (int)sourceRow; fracCol = sourceCol - (int)sourceCol; w1 = ( 1.0 - fracRow ) * ( 1.0 - fracCol ); w2 = ( 1.0 - fracRow ) * fracCol; w3 = fracRow * ( 1.0 - fracCol ); w4 = fracRow * fracCol; bSkipOutOfBounds = false; if ( ((int)sourceRow + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceRow < 0 ) bSkipOutOfBounds = true; if ( ((int)sourceCol + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceCol < 0 ) bSkipOutOfBounds = true; if ( bSkipOutOfBounds == false ) { sRow = (int)sourceRow; sCol = (int)sourceCol; sRowp1 = sRow + 1; sColp1 = sCol + 1; while (sRowp1 >= ImgSize) sRowp1 -= ImgSize; while (sRowp1 < 0) sRowp1 += ImgSize; while (sColp1 >= ImgSize) sColp1 -= ImgSize; while (sColp1 < 0) sColp1 += ImgSize; while (sRow >= ImgSize) sRow -= ImgSize; while (sRow < 0) sRow += ImgSize; while (sCol >= ImgSize) sCol -= ImgSize; while (sCol < 0) sCol += ImgSize; sourceValue = w1 * input[sRow * ImgSize + sCol] + w2 * input[sRow * ImgSize + sColp1] + w3 * input[sRowp1 * ImgSize + sCol] + w4 * input[sRowp1 * ImgSize + sColp1]; } else { sourceValue = -1.0; } output[idx] = sourceValue; } } } void cuApplyRandom(int batch, unsigned long long s, int ImgSize) { curandStatus_t curandStatus; unsigned long long seed = s; curandStatus = curandSetPseudoRandomGeneratorSeed(rand_generator_device, seed); if(curandStatus != CURAND_STATUS_SUCCESS) { printf("curandSetPseudoRandomGeneratorSeed fail\n"); exit(0); } curandGenerateUniform(rand_generator_device, cu_d_randonNumf, getRandomNumLen(batch, ImgSize)); g_getRandomUniform<<<dim3(256),dim3(256)>>>(cu_d_randonNumf, cu_d_randomNum, getRandomNumLen(batch, ImgSize)); cudaDeviceSynchronize(); getLastCudaError("g_getRandomUniform"); int threads = min(512, ImgSize * ImgSize); g_generateDistortionMap<<<dim3(batch),threads>>>(cuDispH->devData, cuDispV->devData, cu_d_randomNum, cuGaussianKernel->devData, Config::instance()->getDistortion(), Config::instance()->getScale(), Config::instance()->getRotation(), ImgSize); cudaDeviceSynchronize(); getLastCudaError("g_generateDistortionMap"); } void cuApplyScaleAndRotate(int batch, int ImgSize, double scaling, double rotation) { g_scaleAndRotate<<<dim3(batch),dim3(512)>>>( cuDispH->devData, cuDispV->devData, scaling, rotation, ImgSize); cudaDeviceSynchronize(); getLastCudaError("g_generateDistortionMap"); } void cuApplyDistortion(double**inputs, double**outputs, int batch, int ImgSize) { int threadidx = min(ImgSize * ImgSize, 512); g_applyDistortionMap<<<dim3(batch, Config::instance()->getChannels()), dim3(threadidx)>>>(inputs, outputs, cuDispH->devData, cuDispV->devData, ImgSize); cudaDeviceSynchronize(); getLastCudaError("g_applyDistortionMap"); } /* * blocks : dim3(batch, channels) * threads : dim3(min(ImgSize*ImgSize, 512)) */ __global__ void g_applyCropRandom(double**_inputs, double**_outputs, double* random, int crop, int ImgSize) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + crop; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; double* input = _inputs [blockIdx.x] + c * inputImgSize2; double* output= _outputs[blockIdx.x]+ c * outputImgSize2; int sx =(int)((random[blockIdx.x] + 1.0) * 0.5 * crop); int sy =(int)((random[blockIdx.x + 1] + 1.0) * 0.5 * crop); if(sx > crop) sx = crop; if(sy > crop) sy = crop; if(sx < 0) sx = 0; if(sy < 0) sy = 0; for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } /* * blocks : dim3(batch, channels) * threads: dim3(min(ImgSize * ImgSize, 512); */ __global__ void g_applyCrop(double**_inputs, double**_outputs, double* random, int croplen, int ImgSize, int cropr, int cropc) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + croplen; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; double* input = _inputs [blockIdx.x]+ c * inputImgSize2 ; double* output= _outputs[blockIdx.x]+ c * outputImgSize2; int sx = cropr; int sy = cropc; for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } void cuApplyCropRandom(double**inputs, double**outputs, int batch, int ImgSize) { int threads = min(512, ImgSize * ImgSize); g_applyCropRandom<<<dim3(batch, Config::instance()->getChannels()), dim3(threads)>>>(inputs, outputs, cu_d_randomNum, Config::instance()->getCrop(), ImgSize); cudaDeviceSynchronize(); getLastCudaError("g_applyCropRandom"); } void cuApplyCrop(double**inputs, double**outputs, int batch, int ImgSize, int cropr, int cropc) { int threads = min(512, ImgSize * ImgSize); g_applyCrop<<<dim3(batch, Config::instance()->getChannels()), dim3(threads)>>>(inputs, outputs,cu_d_randomNum, Config::instance()->getCrop(), ImgSize, cropr, cropc); cudaDeviceSynchronize(); getLastCudaError("g_applyCrop"); } __device__ void swap(double& val1, double& val2){ double tmp = val1; val1 = val2; val2 = tmp; } /* * function: orizontal Reflection * blocks : dim3(batch, Config::instance()->getChannels()), * threads : dim3(threads) * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ __global__ void g_applyHorizontal(double**_inputs, double**_outputs, double* rand, int ImgSize, int flag) { int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize ; double* input = _inputs[blockIdx.x] + c * ImgSize2; double* output= _outputs[blockIdx.x]+ c * ImgSize2; int half = ImgSize / 2; for(int is = 0; is < half * ImgSize; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < half * ImgSize) { int ox = idx / half; int oy = idx % half; int ix = ox; int iy = ImgSize - oy - 1; if(flag == RANDOM_HORIZONTAL) { if(rand[blockIdx.x] <= 0.0){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + iy], input[ix * ImgSize + iy]); } } else if(flag == HORIZONTAL){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + iy], input[ix * ImgSize + iy]); } else if(flag == NOT_HORIZONTAL){ } } } } /* * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ void cuApplyHorizontal(double **inputs, double**outputs, int batch, int ImgSize, int flag) { int threads = std::min(ImgSize * ImgSize / 2, 512); g_applyHorizontal<<<dim3(batch, Config::instance()->getChannels()), dim3(threads)>>>(inputs, outputs, cu_d_randomNum, ImgSize, flag); cudaDeviceSynchronize(); getLastCudaError("g_applyHorizontal"); }
755d8183b64f6004e1f5b1e91240d9ba299a4b24.hip
// !!! This is a file automatically generated by hipify!!! #ifdef USE_CUDNN #include <algorithm> #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/cudnn_conv_layer.hpp" #include "caffe/net.hpp" #include "caffe/solver.hpp" namespace caffe { template<typename Ftype, typename Btype> void CuDNNConvolutionLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const Ftype* weight = this->blobs_[0]->template gpu_data<Ftype>(); for (int i = 0; i < bottom.size(); ++i) { const Ftype* bottom_data = bottom[i]->gpu_data<Ftype>(); Ftype* top_data = top[i]->mutable_gpu_data<Ftype>(); // Forward through cuDNN in parallel over groups. const size_t gsize = workspace_.size() / groups(); CHECK(is_even(gsize)); for (int g = 0; g < this->group_; ++g) { unsigned char* pspace = static_cast<unsigned char*>(workspace_.data()) + gsize * idxg(g); // Filters. CUDNN_CHECK(cudnnConvolutionForward(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Ftype>::one, fwd_bottom_descs_[i], bottom_data + bottom_offset_ * g, fwd_filter_desc_, weight + this->weight_offset_ * g, fwd_conv_descs_[i], fwd_algo_[i], pspace, gsize, cudnn::dataType<Ftype>::zero, fwd_top_descs_[i], top_data + top_offset_ * g)); } // NOLINT_NEXT_LINE(whitespace/operators) for (int ig = 0; ig < groups(); ++ig) { CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(ig))); } if (this->bias_term_) { const Ftype* bias_data = this->blobs_[1]->template gpu_data<Ftype>(); for (int g = 0; g < this->group_; ++g) { CUDNN_CHECK(cudnnAddTensor(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Ftype>::one, fwd_bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Ftype>::one, fwd_top_descs_[i], top_data + top_offset_ * g)); } // Synchronize the work across groups, each of which went into its own stream // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(g))); } } } // end of for i Solver* psolver = this->parent_solver(); if (psolver == nullptr || psolver->is_iter_size_complete()) { // Possibly use faster algorithms by allowing larger workspace. use_modest_workspace_ = false; } } template <typename Ftype, typename Btype> void CuDNNConvolutionLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { const size_t gsize = workspace_.size() / groups(); CHECK(is_even(gsize)); // compute dE/dB = sum_c(dE/dy) if (this->bias_term_ && this->param_propagate_down_[1]) { Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>(); for (int i = 0; i < top.size(); ++i) { Btype* top_diff = top[i]->mutable_gpu_diff<Btype>(); // in parallel over groups for (int g = 0; g < this->group_; ++g) { CUDNN_CHECK(cudnnConvolutionBackwardBias(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Btype>::one, bwd_top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Btype>::one, bwd_bias_desc_, bias_diff + bias_offset_ * g)); } // end of groups // Synchronize the work across groups, each of which went into its own stream // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(g))); } } // end of i } // end of dB // compute dE/dW = dY * X if (this->param_propagate_down_[0]) { Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>(); for (int i = 0; i < top.size(); ++i) { Btype* top_diff = top[i]->mutable_gpu_diff<Btype>(); const Btype* bottom_data = bottom[i]->gpu_data<Btype>(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; ++g) { unsigned char* pspace = static_cast<unsigned char*>(workspace_.data()) + gsize * idxg(g); // Gradient w.r.t. weights. CUDNN_CHECK(cudnnConvolutionBackwardFilter(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Btype>::one, bwd_bottom_descs_[i], bottom_data + bottom_offset_ * g, bwd_top_descs_[i], top_diff + top_offset_ * g, bwd_conv_filter_descs_[i], bwd_filter_algo_[i], pspace, gsize, cudnn::dataType<Btype>::one, bwd_filter_desc_, weight_diff + this->weight_offset_ * g)); } // end of groups // Synchronize the work across groups, each of which went into its own stream // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(g))); } } // end of i } // Backward propagate grad wrt bottom data dE/dX= dE/dY * W const Btype* weight = this->blobs_[0]->template gpu_data<Btype>(); for (int i = 0; i < top.size(); ++i) { if (propagate_down[i]) { // Backward in parallel over groups for (int g = 0; g < this->group_; ++g) { Btype* top_diff = top[i]->mutable_gpu_diff<Btype>(); Btype* bottom_diff = bottom[i]->mutable_gpu_diff<Btype>(); unsigned char* pspace = static_cast<unsigned char*>(workspace_.data()) + gsize * idxg(g); CUDNN_CHECK(cudnnConvolutionBackwardData(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Btype>::one, bwd_filter_desc_, weight + this->weight_offset_ * g, bwd_top_descs_[i], top_diff + top_offset_ * g, bwd_conv_data_descs_[i], bwd_data_algo_[i], pspace, gsize, cudnn::dataType<Btype>::zero, bwd_bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } // Synchronize the work across groups. // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(g))); } } // end if propagate down } // end for i } INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNConvolutionLayer); } // namespace caffe #endif
755d8183b64f6004e1f5b1e91240d9ba299a4b24.cu
#ifdef USE_CUDNN #include <algorithm> #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/cudnn_conv_layer.hpp" #include "caffe/net.hpp" #include "caffe/solver.hpp" namespace caffe { template<typename Ftype, typename Btype> void CuDNNConvolutionLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const Ftype* weight = this->blobs_[0]->template gpu_data<Ftype>(); for (int i = 0; i < bottom.size(); ++i) { const Ftype* bottom_data = bottom[i]->gpu_data<Ftype>(); Ftype* top_data = top[i]->mutable_gpu_data<Ftype>(); // Forward through cuDNN in parallel over groups. const size_t gsize = workspace_.size() / groups(); CHECK(is_even(gsize)); for (int g = 0; g < this->group_; ++g) { unsigned char* pspace = static_cast<unsigned char*>(workspace_.data()) + gsize * idxg(g); // Filters. CUDNN_CHECK(cudnnConvolutionForward(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Ftype>::one, fwd_bottom_descs_[i], bottom_data + bottom_offset_ * g, fwd_filter_desc_, weight + this->weight_offset_ * g, fwd_conv_descs_[i], fwd_algo_[i], pspace, gsize, cudnn::dataType<Ftype>::zero, fwd_top_descs_[i], top_data + top_offset_ * g)); } // NOLINT_NEXT_LINE(whitespace/operators) for (int ig = 0; ig < groups(); ++ig) { CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(ig))); } if (this->bias_term_) { const Ftype* bias_data = this->blobs_[1]->template gpu_data<Ftype>(); for (int g = 0; g < this->group_; ++g) { CUDNN_CHECK(cudnnAddTensor(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Ftype>::one, fwd_bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Ftype>::one, fwd_top_descs_[i], top_data + top_offset_ * g)); } // Synchronize the work across groups, each of which went into its own stream // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(g))); } } } // end of for i Solver* psolver = this->parent_solver(); if (psolver == nullptr || psolver->is_iter_size_complete()) { // Possibly use faster algorithms by allowing larger workspace. use_modest_workspace_ = false; } } template <typename Ftype, typename Btype> void CuDNNConvolutionLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { const size_t gsize = workspace_.size() / groups(); CHECK(is_even(gsize)); // compute dE/dB = sum_c(dE/dy) if (this->bias_term_ && this->param_propagate_down_[1]) { Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>(); for (int i = 0; i < top.size(); ++i) { Btype* top_diff = top[i]->mutable_gpu_diff<Btype>(); // in parallel over groups for (int g = 0; g < this->group_; ++g) { CUDNN_CHECK(cudnnConvolutionBackwardBias(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Btype>::one, bwd_top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Btype>::one, bwd_bias_desc_, bias_diff + bias_offset_ * g)); } // end of groups // Synchronize the work across groups, each of which went into its own stream // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(g))); } } // end of i } // end of dB // compute dE/dW = dY * X if (this->param_propagate_down_[0]) { Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>(); for (int i = 0; i < top.size(); ++i) { Btype* top_diff = top[i]->mutable_gpu_diff<Btype>(); const Btype* bottom_data = bottom[i]->gpu_data<Btype>(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; ++g) { unsigned char* pspace = static_cast<unsigned char*>(workspace_.data()) + gsize * idxg(g); // Gradient w.r.t. weights. CUDNN_CHECK(cudnnConvolutionBackwardFilter(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Btype>::one, bwd_bottom_descs_[i], bottom_data + bottom_offset_ * g, bwd_top_descs_[i], top_diff + top_offset_ * g, bwd_conv_filter_descs_[i], bwd_filter_algo_[i], pspace, gsize, cudnn::dataType<Btype>::one, bwd_filter_desc_, weight_diff + this->weight_offset_ * g)); } // end of groups // Synchronize the work across groups, each of which went into its own stream // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(g))); } } // end of i } // Backward propagate grad wrt bottom data dE/dX= dE/dY * W const Btype* weight = this->blobs_[0]->template gpu_data<Btype>(); for (int i = 0; i < top.size(); ++i) { if (propagate_down[i]) { // Backward in parallel over groups for (int g = 0; g < this->group_; ++g) { Btype* top_diff = top[i]->mutable_gpu_diff<Btype>(); Btype* bottom_diff = bottom[i]->mutable_gpu_diff<Btype>(); unsigned char* pspace = static_cast<unsigned char*>(workspace_.data()) + gsize * idxg(g); CUDNN_CHECK(cudnnConvolutionBackwardData(Caffe::cudnn_handle(idxg(g)), cudnn::dataType<Btype>::one, bwd_filter_desc_, weight + this->weight_offset_ * g, bwd_top_descs_[i], top_diff + top_offset_ * g, bwd_conv_data_descs_[i], bwd_data_algo_[i], pspace, gsize, cudnn::dataType<Btype>::zero, bwd_bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } // Synchronize the work across groups. // NOLINT_NEXT_LINE(whitespace/operators) for (int g = 0; g < groups(); ++g) { CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(g))); } } // end if propagate down } // end for i } INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNConvolutionLayer); } // namespace caffe #endif
bd8210752c89a1aa4e9615d373598b866357c35c.hip
// !!! This is a file automatically generated by hipify!!! // ######################################################################## // Practical Course: GPU Programming in Computer Vision // Technical University of Munich, Computer Vision Group // ######################################################################## #include "structure_tensor.cuh" #include <iostream> #include <hip/hip_runtime.h> #include "helper.cuh" __device__ void computeEigenValuesOfMatrix2x2(float& lambda1, float& lambda2, const float& a, const float& b, const float& c, const float& d) { float determinant = a*d - b*c; float trace = a + d; lambda1 = trace/2 + powf(trace*trace/4 - determinant, 0.5f); lambda2 = trace/2 - powf(trace*trace/4 - determinant, 0.5f); } __global__ void computeTensorOutputKernel(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // TODO (8.3) compute structure tensor output // alpha = 0.005 // beta = 0.001 if (nc != 3) return; if (x >= w || y >= h) return; // corner if (lmb2[y*w + x] >= lmb1[y*w + x] && lmb1[y*w + x] >= alpha) { imgOut[0*h*w + y*w + x] = 1.0f; imgOut[1*h*w + y*w + x] = 0.0f; imgOut[2*h*w + y*w + x] = 0.0f; } // edge else if (lmb1[y*w + x] <= beta && beta < alpha && alpha <= lmb2[y*w + x]) { imgOut[0*h*w + y*w + x] = 1.0f; imgOut[1*h*w + y*w + x] = 1.0f; imgOut[2*h*w + y*w + x] = 0.0f; } // otherwise else { imgOut[0*h*w + y*w + x] = imgIn[0*h*w + y*w + x] * 0.5f; imgOut[1*h*w + y*w + x] = imgIn[1*h*w + y*w + x] * 0.5f; imgOut[2*h*w + y*w + x] = imgIn[2*h*w + y*w + x] * 0.5f; } } __global__ void computeDetectorKernel(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // TODO (8.1) compute eigenvalues float lambda1; float lambda2; computeEigenValuesOfMatrix2x2(lambda1, lambda2, tensor11[y*w + x], tensor12[y*w + x], tensor12[y*w + x], tensor22[y*w + x]); // TODO (8.2) implement detector if (lambda1 <= lambda2) { lmb1[y*w + x] = lambda1; lmb2[y*w + x] = lambda2; } else { lmb1[y*w + x] = lambda2; lmb2[y*w + x] = lambda1; } } __global__ void computeStructureTensorKernel(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { // TODO (7.3) compute structure tensor int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; tensor11[y*w + x] = 0; tensor12[y*w + x] = 0; tensor22[y*w + x] = 0; for (int z = 0; z < nc; z++) { tensor11[y*w + x] += dx[z*h*w + y*w + x]*dx[z*h*w + y*w + x]; tensor12[y*w + x] += dx[z*h*w + y*w + x]*dy[z*h*w + y*w + x]; tensor22[y*w + x] += dy[z*h*w + y*w + x]*dy[z*h*w + y*w + x]; } } void computeTensorOutputCuda(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { // calculate block and grid size dim3 block(32, 32, 1); // TODO (8.3) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (8.3) execute kernel for computing tensor output hipLaunchKernelGGL(( computeTensorOutputKernel) , dim3(grid), dim3(block), 0, 0, imgOut, lmb1, lmb2, imgIn, w, h, nc, alpha, beta); // check for errors // TODO (8.3) CUDA_CHECK; } void computeDetectorCuda(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { // calculate block and grid size dim3 block(32, 32, 1); // TODO (8.2) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (8.2) execute kernel for detector hipLaunchKernelGGL(( computeDetectorKernel) , dim3(grid), dim3(block), 0, 0, lmb1, lmb2, tensor11, tensor12, tensor22, w, h); // check for errors // TODO (8.2) CUDA_CHECK; } void computeStructureTensorCuda(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 32, 1); // TODO (7.3) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (7.3) execute structure tensor kernel hipLaunchKernelGGL(( computeStructureTensorKernel) , dim3(grid), dim3(block), 0, 0, tensor11, tensor12, tensor22, dx, dy, w, h, nc); // check for errors // TODO (7.3) CUDA_CHECK; }
bd8210752c89a1aa4e9615d373598b866357c35c.cu
// ######################################################################## // Practical Course: GPU Programming in Computer Vision // Technical University of Munich, Computer Vision Group // ######################################################################## #include "structure_tensor.cuh" #include <iostream> #include <cuda_runtime.h> #include "helper.cuh" __device__ void computeEigenValuesOfMatrix2x2(float& lambda1, float& lambda2, const float& a, const float& b, const float& c, const float& d) { float determinant = a*d - b*c; float trace = a + d; lambda1 = trace/2 + powf(trace*trace/4 - determinant, 0.5f); lambda2 = trace/2 - powf(trace*trace/4 - determinant, 0.5f); } __global__ void computeTensorOutputKernel(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // TODO (8.3) compute structure tensor output // alpha = 0.005 // beta = 0.001 if (nc != 3) return; if (x >= w || y >= h) return; // corner if (lmb2[y*w + x] >= lmb1[y*w + x] && lmb1[y*w + x] >= alpha) { imgOut[0*h*w + y*w + x] = 1.0f; imgOut[1*h*w + y*w + x] = 0.0f; imgOut[2*h*w + y*w + x] = 0.0f; } // edge else if (lmb1[y*w + x] <= beta && beta < alpha && alpha <= lmb2[y*w + x]) { imgOut[0*h*w + y*w + x] = 1.0f; imgOut[1*h*w + y*w + x] = 1.0f; imgOut[2*h*w + y*w + x] = 0.0f; } // otherwise else { imgOut[0*h*w + y*w + x] = imgIn[0*h*w + y*w + x] * 0.5f; imgOut[1*h*w + y*w + x] = imgIn[1*h*w + y*w + x] * 0.5f; imgOut[2*h*w + y*w + x] = imgIn[2*h*w + y*w + x] * 0.5f; } } __global__ void computeDetectorKernel(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; // TODO (8.1) compute eigenvalues float lambda1; float lambda2; computeEigenValuesOfMatrix2x2(lambda1, lambda2, tensor11[y*w + x], tensor12[y*w + x], tensor12[y*w + x], tensor22[y*w + x]); // TODO (8.2) implement detector if (lambda1 <= lambda2) { lmb1[y*w + x] = lambda1; lmb2[y*w + x] = lambda2; } else { lmb1[y*w + x] = lambda2; lmb2[y*w + x] = lambda1; } } __global__ void computeStructureTensorKernel(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { // TODO (7.3) compute structure tensor int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; tensor11[y*w + x] = 0; tensor12[y*w + x] = 0; tensor22[y*w + x] = 0; for (int z = 0; z < nc; z++) { tensor11[y*w + x] += dx[z*h*w + y*w + x]*dx[z*h*w + y*w + x]; tensor12[y*w + x] += dx[z*h*w + y*w + x]*dy[z*h*w + y*w + x]; tensor22[y*w + x] += dy[z*h*w + y*w + x]*dy[z*h*w + y*w + x]; } } void computeTensorOutputCuda(float *imgOut, const float *lmb1, const float *lmb2, const float *imgIn, int w, int h, int nc, float alpha, float beta) { // calculate block and grid size dim3 block(32, 32, 1); // TODO (8.3) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (8.3) execute kernel for computing tensor output computeTensorOutputKernel <<<grid, block>>> (imgOut, lmb1, lmb2, imgIn, w, h, nc, alpha, beta); // check for errors // TODO (8.3) CUDA_CHECK; } void computeDetectorCuda(float *lmb1, float *lmb2, const float *tensor11, const float *tensor12, const float *tensor22, int w, int h) { // calculate block and grid size dim3 block(32, 32, 1); // TODO (8.2) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (8.2) execute kernel for detector computeDetectorKernel <<<grid, block>>> (lmb1, lmb2, tensor11, tensor12, tensor22, w, h); // check for errors // TODO (8.2) CUDA_CHECK; } void computeStructureTensorCuda(float *tensor11, float *tensor12, float *tensor22, const float *dx, const float *dy, int w, int h, int nc) { // calculate block and grid size dim3 block(32, 32, 1); // TODO (7.3) specify suitable block size dim3 grid = computeGrid2D(block, w, h); // run cuda kernel // TODO (7.3) execute structure tensor kernel computeStructureTensorKernel <<<grid, block>>> (tensor11, tensor12, tensor22, dx, dy, w, h, nc); // check for errors // TODO (7.3) CUDA_CHECK; }
19526d06f818cda8dbca25cb687d188936217b37.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); extern int image_N; float * dev_ref_luma; float * dev_ref_chroma_u; float * dev_ref_chroma_v; float* device_luma; float* device_chroma_u; float* device_chroma_v; unsigned char* device_source; __global__ void ntsc_encode_frame(int N, float* luma, float* chroma_u, float* chroma_v, unsigned char* source) { // compute overall index from position of thread in current block, // and given the block we are in int refIndex = blockIdx.x * blockDim.x + threadIdx.x; int line = blockIdx.x; int col = threadIdx.x; int imageOffset = (line * 514 + col) * 4; float im_R = source[imageOffset] / 256.0; float im_G = source[imageOffset+1] / 256.0; float im_B = source[imageOffset+2] / 256.0; float im_A = source[imageOffset+3] / 256.0; float im_Y = im_R * .299 + im_G * .587 + im_B * .114; float im_U = .492 * (im_B - im_Y); float im_V = .877 * (im_R - im_Y); //if (index > 1000 & index < 1100) printf("Index: %d\n, Block: %d, Dim: %d, Thread: %d", // index, blockIdx.x, blockDim.x, threadIdx.x); int arrIndex = 635 * line + 106 + col; luma[arrIndex] = (im_Y * 0.7) + 0.3; chroma_u[arrIndex] = im_U; chroma_v[arrIndex] = im_V; } void ntscCuda(int N, float* luma, float* chroma_u, float* chroma_v, unsigned char* source){ int totalBytes = sizeof(float) * N; int totalBytesUChar = sizeof(unsigned char) * image_N; // compute number of blocks and threads per block //const int threadsPerBlock = 512; //const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; const int blocks = ((N / 635) - 20); //242 TV Lines const int threadsPerBlock = 514; //514 'pixels' per line // start timing double startTime = CycleTimer::currentSeconds(); // run kernel hipLaunchKernelGGL(( ntsc_encode_frame), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, device_luma, device_chroma_u, device_chroma_v, device_source); hipDeviceSynchronize(); // copy result from GPU using hipMemcpy hipMemcpy(luma, device_luma, totalBytes, hipMemcpyDeviceToHost); hipMemcpy(chroma_u, device_chroma_u, totalBytes, hipMemcpyDeviceToHost); hipMemcpy(chroma_v, device_chroma_v, totalBytes, hipMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } void load_reference_arrays(float * luma, float * chroma_u, float * chroma_v, int n){ int totalBytes = n * sizeof(float); int totalBytesUChar = sizeof(unsigned char) * image_N; hipMalloc(&dev_ref_luma, totalBytes); hipMalloc(&dev_ref_chroma_u, totalBytes); hipMalloc(&dev_ref_chroma_v, totalBytes); hipMemcpy(dev_ref_luma, luma, totalBytes, hipMemcpyHostToDevice); hipMemcpy(dev_ref_chroma_u, chroma_u, totalBytes, hipMemcpyHostToDevice); hipMemcpy(dev_ref_chroma_v, chroma_v, totalBytes, hipMemcpyHostToDevice); // // TODO allocate device memory buffers on the GPU using hipMalloc // hipMalloc(&device_luma, totalBytes); hipMalloc(&device_chroma_u, totalBytes); hipMalloc(&device_chroma_v, totalBytes); hipMalloc(&device_source, totalBytesUChar); } void clear_reference_arrays(){ hipFree(dev_ref_luma); hipFree(dev_ref_chroma_u); hipFree(dev_ref_chroma_v); // TODO free memory buffers on the GPU hipFree(device_luma); hipFree(device_chroma_u); hipFree(device_chroma_v); hipFree(device_source); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
19526d06f818cda8dbca25cb687d188936217b37.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); extern int image_N; float * dev_ref_luma; float * dev_ref_chroma_u; float * dev_ref_chroma_v; float* device_luma; float* device_chroma_u; float* device_chroma_v; unsigned char* device_source; __global__ void ntsc_encode_frame(int N, float* luma, float* chroma_u, float* chroma_v, unsigned char* source) { // compute overall index from position of thread in current block, // and given the block we are in int refIndex = blockIdx.x * blockDim.x + threadIdx.x; int line = blockIdx.x; int col = threadIdx.x; int imageOffset = (line * 514 + col) * 4; float im_R = source[imageOffset] / 256.0; float im_G = source[imageOffset+1] / 256.0; float im_B = source[imageOffset+2] / 256.0; float im_A = source[imageOffset+3] / 256.0; float im_Y = im_R * .299 + im_G * .587 + im_B * .114; float im_U = .492 * (im_B - im_Y); float im_V = .877 * (im_R - im_Y); //if (index > 1000 & index < 1100) printf("Index: %d\n, Block: %d, Dim: %d, Thread: %d", // index, blockIdx.x, blockDim.x, threadIdx.x); int arrIndex = 635 * line + 106 + col; luma[arrIndex] = (im_Y * 0.7) + 0.3; chroma_u[arrIndex] = im_U; chroma_v[arrIndex] = im_V; } void ntscCuda(int N, float* luma, float* chroma_u, float* chroma_v, unsigned char* source){ int totalBytes = sizeof(float) * N; int totalBytesUChar = sizeof(unsigned char) * image_N; // compute number of blocks and threads per block //const int threadsPerBlock = 512; //const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; const int blocks = ((N / 635) - 20); //242 TV Lines const int threadsPerBlock = 514; //514 'pixels' per line // start timing double startTime = CycleTimer::currentSeconds(); // run kernel ntsc_encode_frame<<<blocks, threadsPerBlock>>>(N, device_luma, device_chroma_u, device_chroma_v, device_source); cudaThreadSynchronize(); // copy result from GPU using cudaMemcpy cudaMemcpy(luma, device_luma, totalBytes, cudaMemcpyDeviceToHost); cudaMemcpy(chroma_u, device_chroma_u, totalBytes, cudaMemcpyDeviceToHost); cudaMemcpy(chroma_v, device_chroma_v, totalBytes, cudaMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); } void load_reference_arrays(float * luma, float * chroma_u, float * chroma_v, int n){ int totalBytes = n * sizeof(float); int totalBytesUChar = sizeof(unsigned char) * image_N; cudaMalloc(&dev_ref_luma, totalBytes); cudaMalloc(&dev_ref_chroma_u, totalBytes); cudaMalloc(&dev_ref_chroma_v, totalBytes); cudaMemcpy(dev_ref_luma, luma, totalBytes, cudaMemcpyHostToDevice); cudaMemcpy(dev_ref_chroma_u, chroma_u, totalBytes, cudaMemcpyHostToDevice); cudaMemcpy(dev_ref_chroma_v, chroma_v, totalBytes, cudaMemcpyHostToDevice); // // TODO allocate device memory buffers on the GPU using cudaMalloc // cudaMalloc(&device_luma, totalBytes); cudaMalloc(&device_chroma_u, totalBytes); cudaMalloc(&device_chroma_v, totalBytes); cudaMalloc(&device_source, totalBytesUChar); } void clear_reference_arrays(){ cudaFree(dev_ref_luma); cudaFree(dev_ref_chroma_u); cudaFree(dev_ref_chroma_v); // TODO free memory buffers on the GPU cudaFree(device_luma); cudaFree(device_chroma_u); cudaFree(device_chroma_v); cudaFree(device_source); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
2727360ecebf7429189246c2391b33a7d81d5726.hip
// !!! This is a file automatically generated by hipify!!! #include "common_hip.cuh" #include "global_variables.cuh" #include "utilities.cuh" #include <conio.h> #include <chrono> int fileReader(); int initialize(); int mainLoop(); void multiprocessor_writer(); int main(void) { float save_count = 0; std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); std::cout << "INITIALIZING...\n"; int rdr = fileReader(); if (rdr != 0) { printf("\n\nERROR READING PROPS FILES\n\n"); _getch(); return 1; } int init = initialize(); std::chrono::high_resolution_clock::time_point init_end = std::chrono::high_resolution_clock::now(); auto init_time = std::chrono::duration_cast<std::chrono::seconds>(init_end - start).count(); std::cout << "It took " << init_time << " s to initialize\n" << "----------------------------------------------------------------\n\n"; if (init != 0) { printf("\n\nINITIALIZATION ERROR\n\n"); _getch(); return 1; } std::cout << "MAIN LOOP:\n" << "Progress:" << std::endl; while (simulation_time < final_time) { displayProgress(start); int main_loop = mainLoop(); if (main_loop != 0) { printf("\n\nMAIN LOOP ERROR\n\n"); _getch(); return 1; } save_count += delta_t; if (save_count > save_steps / 1000) { multiprocessor_writer(); save_count = fmod(simulation_time,(save_steps / 1000)); } } std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); hipDeviceReset(); _getch(); return 0; }
2727360ecebf7429189246c2391b33a7d81d5726.cu
#include "common.cuh" #include "global_variables.cuh" #include "utilities.cuh" #include <conio.h> #include <chrono> int fileReader(); int initialize(); int mainLoop(); void multiprocessor_writer(); int main(void) { float save_count = 0; std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); std::cout << "INITIALIZING...\n"; int rdr = fileReader(); if (rdr != 0) { printf("\n\nERROR READING PROPS FILES\n\n"); _getch(); return 1; } int init = initialize(); std::chrono::high_resolution_clock::time_point init_end = std::chrono::high_resolution_clock::now(); auto init_time = std::chrono::duration_cast<std::chrono::seconds>(init_end - start).count(); std::cout << "It took " << init_time << " s to initialize\n" << "----------------------------------------------------------------\n\n"; if (init != 0) { printf("\n\nINITIALIZATION ERROR\n\n"); _getch(); return 1; } std::cout << "MAIN LOOP:\n" << "Progress:" << std::endl; while (simulation_time < final_time) { displayProgress(start); int main_loop = mainLoop(); if (main_loop != 0) { printf("\n\nMAIN LOOP ERROR\n\n"); _getch(); return 1; } save_count += delta_t; if (save_count > save_steps / 1000) { multiprocessor_writer(); save_count = fmod(simulation_time,(save_steps / 1000)); } } std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now(); cudaDeviceReset(); _getch(); return 0; }
5fd428813c82131becd942bb436756a7b26eac32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void sgd_with_momentum(float* w, const float* dw, float learning_rate, float momentum, float* v, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { v[tid] = momentum * v[tid] + dw[tid]; w[tid] -= learning_rate * v[tid]; } }
5fd428813c82131becd942bb436756a7b26eac32.cu
#include "includes.h" extern "C" { } __global__ void sgd_with_momentum(float* w, const float* dw, float learning_rate, float momentum, float* v, unsigned int len) { int tid = blockIdx.x*blockDim.x + threadIdx.x; if (tid < len) { v[tid] = momentum * v[tid] + dw[tid]; w[tid] -= learning_rate * v[tid]; } }
52b91db0f973993cefec26fbaeebe96629216fc0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /// ================================================================ /// /// Disclaimer: IMPORTANT: This software was developed at theNT /// National Institute of Standards and Technology by employees of the /// Federal Government in the course of their official duties. /// Pursuant to title 17 Section 105 of the United States Code this /// software is not subject to copyright protection and is in the /// public domain. This is an experimental system. NIST assumes no /// responsibility whatsoever for its use by other parties, and makes /// no guarantees, expressed or implied, about its quality, /// reliability, or any other characteristic. We would appreciate /// acknowledgement if the software is used. This software can be /// redistributed and/or modified freely provided that any derivative /// works bear some notice that they are derived from it, and any /// modified versions bear some notice that they have been modified. /// /// ================================================================ // ================================================================ // // Author: Timothy Blattner // Date: Wed Nov 30 12:36:40 2011 EScufftDoubleComplex // // Functions that execute on the graphics card for doing // Vector computation. // // ================================================================ #define THREADS_PER_BLOCK 256 #define MIN_DISTANCE 1.0 // ================================================================ __global__ void reduce_max_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; float myMax = 0.0; int myMaxIndex; float val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } }
52b91db0f973993cefec26fbaeebe96629216fc0.cu
#include "includes.h" /// ================================================================ /// /// Disclaimer: IMPORTANT: This software was developed at theNT /// National Institute of Standards and Technology by employees of the /// Federal Government in the course of their official duties. /// Pursuant to title 17 Section 105 of the United States Code this /// software is not subject to copyright protection and is in the /// public domain. This is an experimental system. NIST assumes no /// responsibility whatsoever for its use by other parties, and makes /// no guarantees, expressed or implied, about its quality, /// reliability, or any other characteristic. We would appreciate /// acknowledgement if the software is used. This software can be /// redistributed and/or modified freely provided that any derivative /// works bear some notice that they are derived from it, and any /// modified versions bear some notice that they have been modified. /// /// ================================================================ // ================================================================ // // Author: Timothy Blattner // Date: Wed Nov 30 12:36:40 2011 EScufftDoubleComplex // // Functions that execute on the graphics card for doing // Vector computation. // // ================================================================ #define THREADS_PER_BLOCK 256 #define MIN_DISTANCE 1.0 // ================================================================ __global__ void reduce_max_mainf(float *g_idata, float *g_odata, int * max_idx, unsigned int n, int blockSize) { __shared__ float sdata[THREADS_PER_BLOCK]; __shared__ int idxData[THREADS_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize) + tid; unsigned int gridSize = blockSize*gridDim.x; float myMax = 0.0; int myMaxIndex; float val; while (i < n) { val = g_idata[i]; if (myMax < val) { myMax = val; myMaxIndex = i; } if (i+blockSize < n) { val = g_idata[i+blockSize]; if (myMax < val) { myMax = val; myMaxIndex = i+blockSize; } } i += gridSize; } sdata[tid] = myMax; idxData[tid] = myMaxIndex; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { if (myMax < sdata[tid + 256]) { sdata[tid] = myMax = sdata[tid+256]; idxData[tid] = idxData[tid+256]; } } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { if (myMax < sdata[tid + 128]) { sdata[tid] = myMax = sdata[tid+128]; idxData[tid] = idxData[tid+128]; } } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { if(myMax < sdata[tid + 64]) { sdata[tid] = myMax = sdata[tid+64]; idxData[tid] = idxData[tid+64]; } } __syncthreads(); } volatile float *vdata = sdata; volatile int *vidxData = idxData; if (tid < 32) { if (blockSize >= 64) if (myMax < vdata[tid + 32]) { vdata[tid] = myMax = vdata[tid+32]; vidxData[tid] = vidxData[tid+32]; } if (blockSize >= 32) if (myMax < vdata[tid + 16]) { vdata[tid] = myMax = vdata[tid+16]; vidxData[tid] = vidxData[tid+16]; } if (blockSize >= 16) if (myMax < vdata[tid + 8]) { vdata[tid] = myMax = vdata[tid+8]; vidxData[tid] = vidxData[tid+8]; } if (blockSize >= 8) if (myMax < vdata[tid + 4]) { vdata[tid] = myMax = vdata[tid+4]; vidxData[tid] = vidxData[tid+4]; } if (blockSize >= 4) if (myMax < vdata[tid+2]) { vdata[tid] = myMax = vdata[tid+2]; vidxData[tid] = vidxData[tid+2]; } if (blockSize >= 2) if (myMax < vdata[tid + 1]) { vdata[tid] = myMax = vdata[tid+1]; vidxData[tid] = vidxData[tid+1]; } __syncthreads(); } if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; max_idx[blockIdx.x] = idxData[0]; } }
61423cf24da34a29a14664c3b6a56273caee7656.hip
// !!! This is a file automatically generated by hipify!!! /* ------------------------------------------------- File : BasicRoom.cu Author : Craig J. Webb Date : 18/03/12 Desc : 3D wave eq test ------------------------------------------------- */ // Set precision #include "blocks.h" #include "timing.h" #include "hip/hip_runtime_api.h" #define area (Nx*Ny) // Define Source and Read #define Sx 120 #define Sy 120 #define Sz 60 #define Nx 512 #define Ny 512 #define Nz 404 #define Rx 50 #define Ry 50 #define Rz 50 #define numberSamples 4410 #define dim 3 #define nano 1.e-09 #define pi 3.1415926535897932384626433832795 // kernel methods __global__ void UpdateRoom(double *u, const double* __restrict__ u1); __global__ void inout(double *u,double *out,double ins,int n); typedef struct { double l2; double loss1; double loss2; } coeffs_type; __constant__ coeffs_type cf_d[1]; // ---------------------------------------------- // some of Craig's definitions #define cuErr(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( hipError_t err, const char *file, const int line ){ if( hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } void checkLastCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "\nCuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } void printLastSamples(double *audio, int NF, int N) { int n; double maxy; // Test that N > 0 if(N<1){ printf("Must display 1 or more samples...\n"); } else{ //print last N samples printf("\n"); for(n=NF-N;n<NF;n++) { printf("Sample %d : %.20f\n",n,audio[n]); } // find max maxy = 0.0; for(n=0;n<NF;n++) { if(fabs(audio[n])>maxy) maxy = fabs(audio[n]); } printf("\nMax sample : %.20f\n",maxy); } } int main(){ // Simulation parameters double SR = 1000.0; int NF = 1000; double c = 344.0; double k = 1/SR; double h = sqrt(3.0)*c*k; double l2 = 1.0/3.0; double alpha = 0.005; // Boundary loss double lambda = c*k/h; coeffs_type cf_h[1]; cf_h[0].l2 = (c*c*k*k)/(h*h); cf_h[0].loss1 = 1.0/(1.0+lambda*alpha); cf_h[0].loss2 = 1.0-lambda*alpha; hipMemcpyToSymbol(cf_d,cf_h,sizeof(coeffs_type)) ; //------------------------------------------- // Initialise input int n; size_t pr_size = sizeof(double); int dur = 20; double *si_h = (double *)calloc(NF,pr_size); for(n=0;n<dur;n++){ si_h[n] = 0.5*(1.0-cos(2.0*pi*n/(double)dur)); } // ------------------------------------------ // Set up grid and blocks int Gx = Nx/BLOCK_X; int Gy = Ny/BLOCK_Y; int Gz = (Nz-2)/BLOCK_Z; int GxL = Nx/BLOCK_X; int GyL = Ny/BLOCK_Y; int GzS = Nz-2; dim3 dimBlockInt(BLOCK_X, BLOCK_Y, BLOCK_Z); dim3 dimGridInt(Gx, Gy, Gz); dim3 dimBlockIntL(BLOCK_X, BLOCK_Y, 1); dim3 dimGridIntL(GxL, GyL, 1); dim3 dimBlockIO(1, 1, 1); dim3 dimGridIO(1, 1, 1); size_t mem_size = area*Nz*pr_size; double *out_d, *u_d, *u1_d, *dummy_ptr; double ins; //------------------------------------------- // Initialise memory on device cuErr( hipMalloc(&u_d, mem_size) ); cuErr( hipMemset(u_d, 0, mem_size) ); cuErr( hipMalloc(&u1_d, mem_size) ); cuErr( hipMemset(u1_d, 0, mem_size) ); cuErr( hipMalloc(&out_d, NF*pr_size) ); cuErr( hipMemset(out_d, 0, NF*pr_size) ); //------------------------------------------- // initialise memory on host double *out_h = (double *)calloc(NF,pr_size); if((out_h == NULL)){ printf("\nout_h memory alloc failed...\n"); exit(EXIT_FAILURE); } // using only very basic timings here double start = getTime(); for(n=0;n<NF;n++) { hipLaunchKernelGGL(( UpdateRoom), dim3(dimGridIntL),dim3(dimBlockIntL), 0, 0, u_d,u1_d); // perform read in out ins = si_h[n]; hipLaunchKernelGGL(( inout), dim3(dimGridIO),dim3(dimBlockIO), 0, 0, u_d,out_d,ins,n); // update pointers dummy_ptr = u1_d; u1_d = u_d; u_d = dummy_ptr; } double end = getTime(); // print process time checkLastCUDAError("Kernel"); cuErr( hipDeviceSynchronize() ); double totalTime = end - start; // copy result back from device cuErr( hipMemcpy(out_h, out_d, NF*pr_size, hipMemcpyDeviceToHost) ); // print last samples, and write output file printLastSamples(out_h, NF, 5); double bandwidth = (area*Nz*sizeof(double)*1e-9*2)/(totalTime/NF); printf("\nProcess time : %4.4lf seconds\nBandwidth: %4.4lf", (end-start), bandwidth ); // Free memory free(si_h);free(out_h); hipFree(out_d);hipFree(u_d);hipFree(u1_d); printf("\nPut down that cocktail... Simulation complete.\n\n"); exit(EXIT_SUCCESS); } // Standard 3D update scheme __global__ void UpdateRoom(double *u, const double* __restrict__ u1) { __shared__ double uS1[BLOCK_X+2][BLOCK_Y+2]; // get thread indices int tdx = threadIdx.x; int tdy = threadIdx.y; // get X,Y,Z from thread and block Id's int X = blockIdx.x * BLOCK_X + tdx; int Y = blockIdx.y * BLOCK_Y + tdy; int Z,cp; // Set Z=0, Get Z=1 cp value double u1cpm = 0.0; double u1cp = u1[area+(Y*Nx+X)]; double u1cpp; tdx++; tdy++; for(Z=1;Z<(Nz-1);Z++){ // Test that not at halo, Z block excludes Z halo // get linear position cp = Z*area+(Y*Nx+X); u1cpp = u1[cp+area]; uS1[tdx][tdy] = u1cp; if ( (tdy==1) && !(Y==0) ){ uS1[tdx][tdy-1] = u1[cp-Nx]; } if ( (tdy==BLOCK_Y) && !(Y==(Ny-1)) ){ uS1[tdx][tdy+1] = u1[cp+Nx]; } if ( (tdx==1) && !(X==0) ){ uS1[tdx-1][tdy] = u1[cp-1]; } if ( (tdx==BLOCK_X) && !(X==(Nx-1)) ){ uS1[tdx+1][tdy] = u1[cp+1]; } __syncthreads(); // local variables double cf = 1.0; double cf2 = 1.0; if( (X>0) && (X<(Nx-1)) && (Y>0) && (Y<(Ny-1)) ){ int K = (0||(X-1)) + (0||(X-(Nx-2))) + (0||(Y-1)) + (0||(Y-(Ny-2))) + (0||(Z-1)) + (0||(Z-(Nz-2))); // set loss coeffs at walls if(K<6){ cf = cf_d[0].loss1; cf2 = cf_d[0].loss2; } // Get sum of neighbours double S = uS1[tdx-1][tdy]+uS1[tdx+1][tdy]+uS1[tdx][tdy-1]+uS1[tdx][tdy+1]+u1cpm+u1cpp; // Calc update u[cp] = cf*( (2.0-K*cf_d[0].l2)*u1cp + cf_d[0].l2*S - cf2*u[cp] ); // Shift cps u1cpm = u1cp; u1cp = u1cpp; __syncthreads(); } } } // read output and sum in input __global__ void inout(double *u,double *out,double ins,int n) { // sum in source u[(Sz*area)+(Sy*Nx+Sx)] += ins; // non-interp read out out[n] = u[(Rz*area)+(Ry*Nx+Rx)]; }
61423cf24da34a29a14664c3b6a56273caee7656.cu
/* ------------------------------------------------- File : BasicRoom.cu Author : Craig J. Webb Date : 18/03/12 Desc : 3D wave eq test ------------------------------------------------- */ // Set precision #include "blocks.h" #include "timing.h" #include "cuda_profiler_api.h" #define area (Nx*Ny) // Define Source and Read #define Sx 120 #define Sy 120 #define Sz 60 #define Nx 512 #define Ny 512 #define Nz 404 #define Rx 50 #define Ry 50 #define Rz 50 #define numberSamples 4410 #define dim 3 #define nano 1.e-09 #define pi 3.1415926535897932384626433832795 // kernel methods __global__ void UpdateRoom(double *u, const double* __restrict__ u1); __global__ void inout(double *u,double *out,double ins,int n); typedef struct { double l2; double loss1; double loss2; } coeffs_type; __constant__ coeffs_type cf_d[1]; // ---------------------------------------------- // some of Craig's definitions #define cuErr(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ){ if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } void checkLastCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "\nCuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } void printLastSamples(double *audio, int NF, int N) { int n; double maxy; // Test that N > 0 if(N<1){ printf("Must display 1 or more samples...\n"); } else{ //print last N samples printf("\n"); for(n=NF-N;n<NF;n++) { printf("Sample %d : %.20f\n",n,audio[n]); } // find max maxy = 0.0; for(n=0;n<NF;n++) { if(fabs(audio[n])>maxy) maxy = fabs(audio[n]); } printf("\nMax sample : %.20f\n",maxy); } } int main(){ // Simulation parameters double SR = 1000.0; int NF = 1000; double c = 344.0; double k = 1/SR; double h = sqrt(3.0)*c*k; double l2 = 1.0/3.0; double alpha = 0.005; // Boundary loss double lambda = c*k/h; coeffs_type cf_h[1]; cf_h[0].l2 = (c*c*k*k)/(h*h); cf_h[0].loss1 = 1.0/(1.0+lambda*alpha); cf_h[0].loss2 = 1.0-lambda*alpha; cudaMemcpyToSymbol(cf_d,cf_h,sizeof(coeffs_type)) ; //------------------------------------------- // Initialise input int n; size_t pr_size = sizeof(double); int dur = 20; double *si_h = (double *)calloc(NF,pr_size); for(n=0;n<dur;n++){ si_h[n] = 0.5*(1.0-cos(2.0*pi*n/(double)dur)); } // ------------------------------------------ // Set up grid and blocks int Gx = Nx/BLOCK_X; int Gy = Ny/BLOCK_Y; int Gz = (Nz-2)/BLOCK_Z; int GxL = Nx/BLOCK_X; int GyL = Ny/BLOCK_Y; int GzS = Nz-2; dim3 dimBlockInt(BLOCK_X, BLOCK_Y, BLOCK_Z); dim3 dimGridInt(Gx, Gy, Gz); dim3 dimBlockIntL(BLOCK_X, BLOCK_Y, 1); dim3 dimGridIntL(GxL, GyL, 1); dim3 dimBlockIO(1, 1, 1); dim3 dimGridIO(1, 1, 1); size_t mem_size = area*Nz*pr_size; double *out_d, *u_d, *u1_d, *dummy_ptr; double ins; //------------------------------------------- // Initialise memory on device cuErr( cudaMalloc(&u_d, mem_size) ); cuErr( cudaMemset(u_d, 0, mem_size) ); cuErr( cudaMalloc(&u1_d, mem_size) ); cuErr( cudaMemset(u1_d, 0, mem_size) ); cuErr( cudaMalloc(&out_d, NF*pr_size) ); cuErr( cudaMemset(out_d, 0, NF*pr_size) ); //------------------------------------------- // initialise memory on host double *out_h = (double *)calloc(NF,pr_size); if((out_h == NULL)){ printf("\nout_h memory alloc failed...\n"); exit(EXIT_FAILURE); } // using only very basic timings here double start = getTime(); for(n=0;n<NF;n++) { UpdateRoom<<<dimGridIntL,dimBlockIntL>>>(u_d,u1_d); // perform read in out ins = si_h[n]; inout<<<dimGridIO,dimBlockIO>>>(u_d,out_d,ins,n); // update pointers dummy_ptr = u1_d; u1_d = u_d; u_d = dummy_ptr; } double end = getTime(); // print process time checkLastCUDAError("Kernel"); cuErr( cudaDeviceSynchronize() ); double totalTime = end - start; // copy result back from device cuErr( cudaMemcpy(out_h, out_d, NF*pr_size, cudaMemcpyDeviceToHost) ); // print last samples, and write output file printLastSamples(out_h, NF, 5); double bandwidth = (area*Nz*sizeof(double)*1e-9*2)/(totalTime/NF); printf("\nProcess time : %4.4lf seconds\nBandwidth: %4.4lf", (end-start), bandwidth ); // Free memory free(si_h);free(out_h); cudaFree(out_d);cudaFree(u_d);cudaFree(u1_d); printf("\nPut down that cocktail... Simulation complete.\n\n"); exit(EXIT_SUCCESS); } // Standard 3D update scheme __global__ void UpdateRoom(double *u, const double* __restrict__ u1) { __shared__ double uS1[BLOCK_X+2][BLOCK_Y+2]; // get thread indices int tdx = threadIdx.x; int tdy = threadIdx.y; // get X,Y,Z from thread and block Id's int X = blockIdx.x * BLOCK_X + tdx; int Y = blockIdx.y * BLOCK_Y + tdy; int Z,cp; // Set Z=0, Get Z=1 cp value double u1cpm = 0.0; double u1cp = u1[area+(Y*Nx+X)]; double u1cpp; tdx++; tdy++; for(Z=1;Z<(Nz-1);Z++){ // Test that not at halo, Z block excludes Z halo // get linear position cp = Z*area+(Y*Nx+X); u1cpp = u1[cp+area]; uS1[tdx][tdy] = u1cp; if ( (tdy==1) && !(Y==0) ){ uS1[tdx][tdy-1] = u1[cp-Nx]; } if ( (tdy==BLOCK_Y) && !(Y==(Ny-1)) ){ uS1[tdx][tdy+1] = u1[cp+Nx]; } if ( (tdx==1) && !(X==0) ){ uS1[tdx-1][tdy] = u1[cp-1]; } if ( (tdx==BLOCK_X) && !(X==(Nx-1)) ){ uS1[tdx+1][tdy] = u1[cp+1]; } __syncthreads(); // local variables double cf = 1.0; double cf2 = 1.0; if( (X>0) && (X<(Nx-1)) && (Y>0) && (Y<(Ny-1)) ){ int K = (0||(X-1)) + (0||(X-(Nx-2))) + (0||(Y-1)) + (0||(Y-(Ny-2))) + (0||(Z-1)) + (0||(Z-(Nz-2))); // set loss coeffs at walls if(K<6){ cf = cf_d[0].loss1; cf2 = cf_d[0].loss2; } // Get sum of neighbours double S = uS1[tdx-1][tdy]+uS1[tdx+1][tdy]+uS1[tdx][tdy-1]+uS1[tdx][tdy+1]+u1cpm+u1cpp; // Calc update u[cp] = cf*( (2.0-K*cf_d[0].l2)*u1cp + cf_d[0].l2*S - cf2*u[cp] ); // Shift cps u1cpm = u1cp; u1cp = u1cpp; __syncthreads(); } } } // read output and sum in input __global__ void inout(double *u,double *out,double ins,int n) { // sum in source u[(Sz*area)+(Sy*Nx+Sx)] += ins; // non-interp read out out[n] = u[(Rz*area)+(Ry*Nx+Rx)]; }
60266154317e4b1537966d6d7d37c268ce8be5af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include "common/Error.h" #include "common/GpuTimer.h" #include "common/Vector.h" #define N 500 #define FULL_DATA_SIZE 100000000 #define DIMGRID 10 #define DIMBLOCK 10 #define XMIN -10.0 #define XMAX 10.0 const int ARRAY_BYTES = N * sizeof(float); const int FULL_ARRAY_BYTES = FULL_DATA_SIZE * sizeof(float); __host__ __device__ float function1(float x) { return x*x; } __host__ __device__ float function2(float x) { return sinf(x); } __global__ void functionKernel1( Vector<float> d_a, int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; float x, dx; dx = (XMAX - (XMIN)) / ((float) N-1); while (i < n) { x = XMIN + i*dx; d_a.setElement( i, function1(x) ); i += blockDim.x*gridDim.x; } } __global__ void functionKernel2( Vector<float> d_a, int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; float x, dx; dx = (XMAX-(XMIN))/((float)N -1); while (i < n) { x = XMIN + i*dx; d_a.setElement( i, function2(x) ); i += blockDim.x*gridDim.x; } } void onDevice(Vector<float> h_a, Vector<float> h_b) { Vector<float> d_a, d_b; // create the stream hipStream_t stream1; hipStream_t stream2; HANDLER_ERROR_ERR(hipStreamCreate (&stream1)); HANDLER_ERROR_ERR(hipStreamCreate (&stream2)); GpuTimer timer1,timer2; // Stream 1 timer1.Start(stream1); HANDLER_ERROR_ERR(hipMalloc((void**)&d_a.elements, ARRAY_BYTES)); HANDLER_ERROR_ERR(hipMalloc((void**)&d_b.elements, ARRAY_BYTES)); for (int i=0; i<FULL_DATA_SIZE; i+= N) { // copy the locked memory to the device, async HANDLER_ERROR_ERR( hipMemcpyAsync( d_a.elements, h_a.elements+i, ARRAY_BYTES, hipMemcpyHostToDevice, stream1 ) ); hipLaunchKernelGGL(( functionKernel1), dim3(DIMGRID), dim3(DIMBLOCK) , 0, stream1 , d_a, N); HANDLER_ERROR_MSG("kernel panic!!!"); // copy the locked memory to the device, async HANDLER_ERROR_ERR( hipMemcpyAsync( h_a.elements+i, d_a.elements, ARRAY_BYTES, hipMemcpyDeviceToHost, stream1 ) ); } // synchronization HANDLER_ERROR_ERR( hipStreamSynchronize( stream1 ) ); timer1.Stop(stream1); // Stream 2 timer2.Start(stream2); for (int i=0; i<FULL_DATA_SIZE; i+= N) { // copy the locked memory to the device, async HANDLER_ERROR_ERR( hipMemcpyAsync( d_b.elements, h_b.elements+i, ARRAY_BYTES, hipMemcpyHostToDevice, stream2 ) ); hipLaunchKernelGGL(( functionKernel2), dim3(DIMGRID), dim3(DIMBLOCK), 0, stream2 , d_b, N); HANDLER_ERROR_MSG("kernel panic!!!"); // copy the locked memory to the device, async HANDLER_ERROR_ERR( hipMemcpyAsync( h_b.elements+i, d_b.elements, ARRAY_BYTES, hipMemcpyDeviceToHost, stream2 ) ); } // synchronization HANDLER_ERROR_ERR( hipStreamSynchronize( stream2 ) ); timer2.Stop(stream2); // print time printf( "Time 1: %f ms\n", timer1.Elapsed() ); // print time printf( "Time 2: %f ms\n", timer2.Elapsed() ); printf("Total time %f ms\n", timer2.Elapsed() + timer1.Elapsed()); //destroy stream HANDLER_ERROR_ERR(hipStreamDestroy(stream1)); HANDLER_ERROR_ERR(hipStreamDestroy(stream2)); //free device memory HANDLER_ERROR_ERR(hipFree(d_a.elements)); HANDLER_ERROR_ERR(hipFree(d_b.elements)); } void test(){ Vector<float> h_a, h_b; h_a.length = FULL_DATA_SIZE; h_b.length = FULL_DATA_SIZE; // allocate host locked memory HANDLER_ERROR_ERR( hipHostMalloc((void**)&h_a.elements, FULL_ARRAY_BYTES, hipHostMallocDefault )); HANDLER_ERROR_ERR( hipHostMalloc((void**)&h_b.elements, FULL_ARRAY_BYTES, hipHostMallocDefault )); int i; for(i = 0; i < FULL_DATA_SIZE; i++){ h_a.setElement(i, 0.0); h_b.setElement(i, 0.0); } // call device configuration onDevice(h_a, h_b); printf("-: successful execution :-\n"); // free host memory HANDLER_ERROR_ERR(hipHostFree( h_a.elements )); HANDLER_ERROR_ERR(hipHostFree( h_b.elements )); } void checkDeviceProps(){ //properties validation hipDeviceProp_t prop; int whichDevice; HANDLER_ERROR_ERR( hipGetDevice( &whichDevice ) ); HANDLER_ERROR_ERR( hipGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); } } int main(){ checkDeviceProps(); test(); }
60266154317e4b1537966d6d7d37c268ce8be5af.cu
#include <iostream> #include <math.h> #include "common/Error.h" #include "common/GpuTimer.h" #include "common/Vector.h" #define N 500 #define FULL_DATA_SIZE 100000000 #define DIMGRID 10 #define DIMBLOCK 10 #define XMIN -10.0 #define XMAX 10.0 const int ARRAY_BYTES = N * sizeof(float); const int FULL_ARRAY_BYTES = FULL_DATA_SIZE * sizeof(float); __host__ __device__ float function1(float x) { return x*x; } __host__ __device__ float function2(float x) { return sinf(x); } __global__ void functionKernel1( Vector<float> d_a, int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; float x, dx; dx = (XMAX - (XMIN)) / ((float) N-1); while (i < n) { x = XMIN + i*dx; d_a.setElement( i, function1(x) ); i += blockDim.x*gridDim.x; } } __global__ void functionKernel2( Vector<float> d_a, int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; float x, dx; dx = (XMAX-(XMIN))/((float)N -1); while (i < n) { x = XMIN + i*dx; d_a.setElement( i, function2(x) ); i += blockDim.x*gridDim.x; } } void onDevice(Vector<float> h_a, Vector<float> h_b) { Vector<float> d_a, d_b; // create the stream cudaStream_t stream1; cudaStream_t stream2; HANDLER_ERROR_ERR(cudaStreamCreate (&stream1)); HANDLER_ERROR_ERR(cudaStreamCreate (&stream2)); GpuTimer timer1,timer2; // Stream 1 timer1.Start(stream1); HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a.elements, ARRAY_BYTES)); HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b.elements, ARRAY_BYTES)); for (int i=0; i<FULL_DATA_SIZE; i+= N) { // copy the locked memory to the device, async HANDLER_ERROR_ERR( cudaMemcpyAsync( d_a.elements, h_a.elements+i, ARRAY_BYTES, cudaMemcpyHostToDevice, stream1 ) ); functionKernel1<<< DIMGRID, DIMBLOCK , 0, stream1 >>>(d_a, N); HANDLER_ERROR_MSG("kernel panic!!!"); // copy the locked memory to the device, async HANDLER_ERROR_ERR( cudaMemcpyAsync( h_a.elements+i, d_a.elements, ARRAY_BYTES, cudaMemcpyDeviceToHost, stream1 ) ); } // synchronization HANDLER_ERROR_ERR( cudaStreamSynchronize( stream1 ) ); timer1.Stop(stream1); // Stream 2 timer2.Start(stream2); for (int i=0; i<FULL_DATA_SIZE; i+= N) { // copy the locked memory to the device, async HANDLER_ERROR_ERR( cudaMemcpyAsync( d_b.elements, h_b.elements+i, ARRAY_BYTES, cudaMemcpyHostToDevice, stream2 ) ); functionKernel2<<< DIMGRID, DIMBLOCK, 0, stream2 >>>(d_b, N); HANDLER_ERROR_MSG("kernel panic!!!"); // copy the locked memory to the device, async HANDLER_ERROR_ERR( cudaMemcpyAsync( h_b.elements+i, d_b.elements, ARRAY_BYTES, cudaMemcpyDeviceToHost, stream2 ) ); } // synchronization HANDLER_ERROR_ERR( cudaStreamSynchronize( stream2 ) ); timer2.Stop(stream2); // print time printf( "Time 1: %f ms\n", timer1.Elapsed() ); // print time printf( "Time 2: %f ms\n", timer2.Elapsed() ); printf("Total time %f ms\n", timer2.Elapsed() + timer1.Elapsed()); //destroy stream HANDLER_ERROR_ERR(cudaStreamDestroy(stream1)); HANDLER_ERROR_ERR(cudaStreamDestroy(stream2)); //free device memory HANDLER_ERROR_ERR(cudaFree(d_a.elements)); HANDLER_ERROR_ERR(cudaFree(d_b.elements)); } void test(){ Vector<float> h_a, h_b; h_a.length = FULL_DATA_SIZE; h_b.length = FULL_DATA_SIZE; // allocate host locked memory HANDLER_ERROR_ERR( cudaHostAlloc((void**)&h_a.elements, FULL_ARRAY_BYTES, cudaHostAllocDefault )); HANDLER_ERROR_ERR( cudaHostAlloc((void**)&h_b.elements, FULL_ARRAY_BYTES, cudaHostAllocDefault )); int i; for(i = 0; i < FULL_DATA_SIZE; i++){ h_a.setElement(i, 0.0); h_b.setElement(i, 0.0); } // call device configuration onDevice(h_a, h_b); printf("-: successful execution :-\n"); // free host memory HANDLER_ERROR_ERR(cudaFreeHost( h_a.elements )); HANDLER_ERROR_ERR(cudaFreeHost( h_b.elements )); } void checkDeviceProps(){ //properties validation cudaDeviceProp prop; int whichDevice; HANDLER_ERROR_ERR( cudaGetDevice( &whichDevice ) ); HANDLER_ERROR_ERR( cudaGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); } } int main(){ checkDeviceProps(); test(); }
272316109f93cc7fb6b172229e58ebb99a3708dd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "VectorMultiplicationKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *array = NULL; hipMalloc(&array, XSIZE*YSIZE); int arrayCount = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( VectorMultiplicationKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,arrayCount); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( VectorMultiplicationKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,arrayCount); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( VectorMultiplicationKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,arrayCount); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
272316109f93cc7fb6b172229e58ebb99a3708dd.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "VectorMultiplicationKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *array = NULL; cudaMalloc(&array, XSIZE*YSIZE); int arrayCount = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); VectorMultiplicationKernel<<<gridBlock,threadBlock>>>(array,arrayCount); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { VectorMultiplicationKernel<<<gridBlock,threadBlock>>>(array,arrayCount); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { VectorMultiplicationKernel<<<gridBlock,threadBlock>>>(array,arrayCount); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1e070189c832a23d7494c0b131d7c095b2cdc954.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <memory> #include <string> #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/data_norm_op.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using DataLayout = phi::DataLayout; using phi::PADDLE_CUDA_NUM_THREADS; inline int GET_BLOCKS(const int N) { return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS; } template <typename T> __global__ void KernelDataNormFF( int N, int C, const T *x, T *y, const T *mean, const T *scale) { CUDA_KERNEL_LOOP(i, N * C) { int col = i % C; y[i] = (x[i] - mean[col]) * scale[col]; } } template <typename T> __global__ void KernelMeanScale(int C, const T *batch_size, const T *batch_sum, const T *batch_square_sum, T *mean, T *scale) { CUDA_KERNEL_LOOP(i, C) { mean[i] = batch_sum[i] / batch_size[i]; scale[i] = sqrt(batch_size[i] / batch_square_sum[i]); } } template <typename T> __global__ void KernelDataNormBP( int N, int C, const T *y_grad, const T *scale, T *x_grad) { CUDA_KERNEL_LOOP(i, N * C) { x_grad[i] = y_grad[i] * scale[i % C]; } } template <typename T> __global__ void KernelDataNormBPStat(int N, int C, const T *x_val, const T *means, const float squared_sum_epsilon, T *batch_size, T *batch_sum, T *batch_square_sum) { CUDA_KERNEL_LOOP(i, C) { T val_sum = 0; T square_sum = 0; for (int j = 0; j < N; j++) { val_sum += x_val[j * C + i]; square_sum += (x_val[j * C + i] - means[i]) * (x_val[j * C + i] - means[i]); } batch_size[i] = 1; batch_sum[i] = val_sum / N; batch_square_sum[i] = square_sum / N + squared_sum_epsilon; } } template <typename T> __global__ void KernelUpdateParam(int C, const T *d_batch_size, const T *d_batch_sum, const T *d_batch_square_sum, T *batch_size, T *batch_sum, T *batch_square_sum, const float decay_rate) { CUDA_KERNEL_LOOP(i, C) { batch_size[i] = batch_size[i] * decay_rate + d_batch_size[i]; batch_sum[i] = batch_sum[i] * decay_rate + d_batch_sum[i]; batch_square_sum[i] = batch_square_sum[i] * decay_rate + d_batch_square_sum[i]; } } template <typename T> class DataNormKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto &x_dims = x->dims(); // Align with CPU version, but should we add this restriction? PADDLE_ENFORCE_EQ( x_dims.size(), 2, platform::errors::PreconditionNotMet("The Input dim size should be 2")); const int N = x_dims[0]; const int C = x_dims[1]; PADDLE_ENFORCE_LT(0, N, platform::errors::InvalidArgument( "The dims of Input(X) should be greater than 0.")); PADDLE_ENFORCE_LT(0, C, platform::errors::InvalidArgument( "The dims of Input(X) should be greater than 0.")); const T *batch_size_in = ctx.Input<phi::DenseTensor>("BatchSize")->data<T>(); const T *batch_sum_in = ctx.Input<phi::DenseTensor>("BatchSum")->data<T>(); const T *batch_square_sum_in = ctx.Input<phi::DenseTensor>("BatchSquareSum")->data<T>(); auto *x_data = x->data<T>(); // alloc memory T *y_data = ctx.Output<phi::DenseTensor>("Y")->mutable_data<T>(ctx.GetPlace()); T *mean_out_data = ctx.Output<phi::DenseTensor>("Means")->mutable_data<T>(ctx.GetPlace()); T *scale_out_data = ctx.Output<phi::DenseTensor>("Scales")->mutable_data<T>(ctx.GetPlace()); auto stream = ctx.template device_context<phi::GPUContext>().stream(); hipLaunchKernelGGL(( KernelMeanScale), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, C, batch_size_in, batch_sum_in, batch_square_sum_in, mean_out_data, scale_out_data); hipLaunchKernelGGL(( KernelDataNormFF), dim3(GET_BLOCKS(C * N)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, N, C, x_data, y_data, mean_out_data, scale_out_data); } }; template <typename T> class DataNormGradKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto *d_y = ctx.Input<phi::DenseTensor>(framework::GradVarName("Y")); const auto *scales = ctx.Input<phi::DenseTensor>("Scales"); const auto *means = ctx.Input<phi::DenseTensor>("Means"); const float epsilon = ctx.Attr<float>("epsilon"); const float dr = ctx.Attr<float>("summary_decay_rate"); const bool need_sync_stats = ctx.Attr<bool>("sync_stats"); const auto &x_dims = x->dims(); // Align with CPU version, but should we add this restriction? PADDLE_ENFORCE_EQ( x_dims.size(), 2, platform::errors::PreconditionNotMet("The Input dim size should be 2")); const int N = x_dims[0]; const int C = x_dims[1]; // init output phi::DenseTensor *d_x = nullptr; if (ctx.HasOutput(framework::GradVarName("X"))) { d_x = ctx.Output<phi::DenseTensor>(framework::GradVarName("X")); } T *d_batch_size = ctx.Output<phi::DenseTensor>(framework::GradVarName("BatchSize")) ->mutable_data<T>(ctx.GetPlace()); T *d_batch_sum = ctx.Output<phi::DenseTensor>(framework::GradVarName("BatchSum")) ->mutable_data<T>(ctx.GetPlace()); T *d_batch_square_sum = ctx.Output<phi::DenseTensor>(framework::GradVarName("BatchSquareSum")) ->mutable_data<T>(ctx.GetPlace()); auto stream = ctx.template device_context<phi::GPUContext>().stream(); if (d_x != nullptr) { hipLaunchKernelGGL(( KernelDataNormBP), dim3(GET_BLOCKS(C * N)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, N, C, d_y->data<T>(), scales->data<T>(), d_x->mutable_data<T>(ctx.GetPlace())); } hipLaunchKernelGGL(( KernelDataNormBPStat), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, N, C, x->data<T>(), means->data<T>(), epsilon, d_batch_size, d_batch_sum, d_batch_square_sum); if (need_sync_stats) { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto comm = platform::NCCLCommContext::Instance().Get(0, ctx.GetPlace()); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast<const void *>(d_batch_size), reinterpret_cast<void *>(d_batch_size), C, platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())), ncclSum, comm->comm(), stream)); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast<const void *>(d_batch_sum), reinterpret_cast<void *>(d_batch_sum), C, platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())), ncclSum, comm->comm(), stream)); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast<const void *>(d_batch_square_sum), reinterpret_cast<void *>(d_batch_square_sum), C, platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())), ncclSum, comm->comm(), stream)); platform::GpuStreamSync(stream); #else PADDLE_THROW(platform::errors::PreconditionNotMet( "PaddlePaddle should compile with GPU, and need_sync_stats connot be " "supported on windows now.")); #endif } T *batch_size_data = ctx.Output<phi::DenseTensor>("BatchSize") ->mutable_data<T>(ctx.GetPlace()); T *batch_sum_data = ctx.Output<phi::DenseTensor>("BatchSum") ->mutable_data<T>(ctx.GetPlace()); T *batch_square_sum_data = ctx.Output<phi::DenseTensor>("BatchSquareSum") ->mutable_data<T>(ctx.GetPlace()); hipLaunchKernelGGL(( KernelUpdateParam), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, C, d_batch_size, d_batch_sum, d_batch_square_sum, batch_size_data, batch_sum_data, batch_square_sum_data, dr); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( data_norm, GPU, ALL_LAYOUT, ops::DataNormKernel, float, double) {} PD_REGISTER_STRUCT_KERNEL( data_norm_grad, GPU, ALL_LAYOUT, ops::DataNormGradKernel, float, double) {}
1e070189c832a23d7494c0b131d7c095b2cdc954.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <memory> #include <string> #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/data_norm_op.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using DataLayout = phi::DataLayout; using phi::PADDLE_CUDA_NUM_THREADS; inline int GET_BLOCKS(const int N) { return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS; } template <typename T> __global__ void KernelDataNormFF( int N, int C, const T *x, T *y, const T *mean, const T *scale) { CUDA_KERNEL_LOOP(i, N * C) { int col = i % C; y[i] = (x[i] - mean[col]) * scale[col]; } } template <typename T> __global__ void KernelMeanScale(int C, const T *batch_size, const T *batch_sum, const T *batch_square_sum, T *mean, T *scale) { CUDA_KERNEL_LOOP(i, C) { mean[i] = batch_sum[i] / batch_size[i]; scale[i] = sqrt(batch_size[i] / batch_square_sum[i]); } } template <typename T> __global__ void KernelDataNormBP( int N, int C, const T *y_grad, const T *scale, T *x_grad) { CUDA_KERNEL_LOOP(i, N * C) { x_grad[i] = y_grad[i] * scale[i % C]; } } template <typename T> __global__ void KernelDataNormBPStat(int N, int C, const T *x_val, const T *means, const float squared_sum_epsilon, T *batch_size, T *batch_sum, T *batch_square_sum) { CUDA_KERNEL_LOOP(i, C) { T val_sum = 0; T square_sum = 0; for (int j = 0; j < N; j++) { val_sum += x_val[j * C + i]; square_sum += (x_val[j * C + i] - means[i]) * (x_val[j * C + i] - means[i]); } batch_size[i] = 1; batch_sum[i] = val_sum / N; batch_square_sum[i] = square_sum / N + squared_sum_epsilon; } } template <typename T> __global__ void KernelUpdateParam(int C, const T *d_batch_size, const T *d_batch_sum, const T *d_batch_square_sum, T *batch_size, T *batch_sum, T *batch_square_sum, const float decay_rate) { CUDA_KERNEL_LOOP(i, C) { batch_size[i] = batch_size[i] * decay_rate + d_batch_size[i]; batch_sum[i] = batch_sum[i] * decay_rate + d_batch_sum[i]; batch_square_sum[i] = batch_square_sum[i] * decay_rate + d_batch_square_sum[i]; } } template <typename T> class DataNormKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto &x_dims = x->dims(); // Align with CPU version, but should we add this restriction? PADDLE_ENFORCE_EQ( x_dims.size(), 2, platform::errors::PreconditionNotMet("The Input dim size should be 2")); const int N = x_dims[0]; const int C = x_dims[1]; PADDLE_ENFORCE_LT(0, N, platform::errors::InvalidArgument( "The dims of Input(X) should be greater than 0.")); PADDLE_ENFORCE_LT(0, C, platform::errors::InvalidArgument( "The dims of Input(X) should be greater than 0.")); const T *batch_size_in = ctx.Input<phi::DenseTensor>("BatchSize")->data<T>(); const T *batch_sum_in = ctx.Input<phi::DenseTensor>("BatchSum")->data<T>(); const T *batch_square_sum_in = ctx.Input<phi::DenseTensor>("BatchSquareSum")->data<T>(); auto *x_data = x->data<T>(); // alloc memory T *y_data = ctx.Output<phi::DenseTensor>("Y")->mutable_data<T>(ctx.GetPlace()); T *mean_out_data = ctx.Output<phi::DenseTensor>("Means")->mutable_data<T>(ctx.GetPlace()); T *scale_out_data = ctx.Output<phi::DenseTensor>("Scales")->mutable_data<T>(ctx.GetPlace()); auto stream = ctx.template device_context<phi::GPUContext>().stream(); KernelMeanScale<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>( C, batch_size_in, batch_sum_in, batch_square_sum_in, mean_out_data, scale_out_data); KernelDataNormFF<<<GET_BLOCKS(C * N), PADDLE_CUDA_NUM_THREADS, 0, stream>>>( N, C, x_data, y_data, mean_out_data, scale_out_data); } }; template <typename T> class DataNormGradKernel<T, phi::GPUContext> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *x = ctx.Input<phi::DenseTensor>("X"); const auto *d_y = ctx.Input<phi::DenseTensor>(framework::GradVarName("Y")); const auto *scales = ctx.Input<phi::DenseTensor>("Scales"); const auto *means = ctx.Input<phi::DenseTensor>("Means"); const float epsilon = ctx.Attr<float>("epsilon"); const float dr = ctx.Attr<float>("summary_decay_rate"); const bool need_sync_stats = ctx.Attr<bool>("sync_stats"); const auto &x_dims = x->dims(); // Align with CPU version, but should we add this restriction? PADDLE_ENFORCE_EQ( x_dims.size(), 2, platform::errors::PreconditionNotMet("The Input dim size should be 2")); const int N = x_dims[0]; const int C = x_dims[1]; // init output phi::DenseTensor *d_x = nullptr; if (ctx.HasOutput(framework::GradVarName("X"))) { d_x = ctx.Output<phi::DenseTensor>(framework::GradVarName("X")); } T *d_batch_size = ctx.Output<phi::DenseTensor>(framework::GradVarName("BatchSize")) ->mutable_data<T>(ctx.GetPlace()); T *d_batch_sum = ctx.Output<phi::DenseTensor>(framework::GradVarName("BatchSum")) ->mutable_data<T>(ctx.GetPlace()); T *d_batch_square_sum = ctx.Output<phi::DenseTensor>(framework::GradVarName("BatchSquareSum")) ->mutable_data<T>(ctx.GetPlace()); auto stream = ctx.template device_context<phi::GPUContext>().stream(); if (d_x != nullptr) { KernelDataNormBP<<<GET_BLOCKS(C * N), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(N, C, d_y->data<T>(), scales->data<T>(), d_x->mutable_data<T>(ctx.GetPlace())); } KernelDataNormBPStat<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>( N, C, x->data<T>(), means->data<T>(), epsilon, d_batch_size, d_batch_sum, d_batch_square_sum); if (need_sync_stats) { #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto comm = platform::NCCLCommContext::Instance().Get(0, ctx.GetPlace()); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast<const void *>(d_batch_size), reinterpret_cast<void *>(d_batch_size), C, platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())), ncclSum, comm->comm(), stream)); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast<const void *>(d_batch_sum), reinterpret_cast<void *>(d_batch_sum), C, platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())), ncclSum, comm->comm(), stream)); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( reinterpret_cast<const void *>(d_batch_square_sum), reinterpret_cast<void *>(d_batch_square_sum), C, platform::ToNCCLDataType(framework::TransToProtoVarType(x->dtype())), ncclSum, comm->comm(), stream)); platform::GpuStreamSync(stream); #else PADDLE_THROW(platform::errors::PreconditionNotMet( "PaddlePaddle should compile with GPU, and need_sync_stats connot be " "supported on windows now.")); #endif } T *batch_size_data = ctx.Output<phi::DenseTensor>("BatchSize") ->mutable_data<T>(ctx.GetPlace()); T *batch_sum_data = ctx.Output<phi::DenseTensor>("BatchSum") ->mutable_data<T>(ctx.GetPlace()); T *batch_square_sum_data = ctx.Output<phi::DenseTensor>("BatchSquareSum") ->mutable_data<T>(ctx.GetPlace()); KernelUpdateParam<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>( C, d_batch_size, d_batch_sum, d_batch_square_sum, batch_size_data, batch_sum_data, batch_square_sum_data, dr); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( data_norm, GPU, ALL_LAYOUT, ops::DataNormKernel, float, double) {} PD_REGISTER_STRUCT_KERNEL( data_norm_grad, GPU, ALL_LAYOUT, ops::DataNormGradKernel, float, double) {}
d6deacfe3830da2e286b12151eb3f8aa18377a83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This is a personal academic project. Dear PVS-Studio, please check it. // PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com #include "coloringMCMC.h" __global__ void ColoringMCMC_k::selectStarColoringDecrease(uint32_t nnodes, uint32_t * starColoring_d, float * qStar_d, col_sz nCol, uint32_t * coloring_d, node_sz * cumulDegs, node * neighs, bool * colorsChecker_d, uint32_t * taboo_d, uint32_t tabooIteration, float * probDistribution_d, hiprandState_t * states, float lambda, float epsilon, uint32_t * statsFreeColors_d) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= nnodes) return; #ifdef TABOO if (taboo_d[idx] > 0) { taboo_d[idx]--; qStar_d[idx] = (1.0f - (nCol - 1) * epsilon); //save the probability of the color chosen return; } #endif // TABOO uint32_t index = cumulDegs[idx]; //index of the node in neighs uint32_t nneighs = cumulDegs[idx + 1] - index; //number of neighbors uint32_t nodeCol = coloring_d[idx]; //node color bool * colorsChecker = &(colorsChecker_d[idx * nCol]); //array used to set if a color is used from the neighbors for (int i = 0; i < nneighs; i++) { colorsChecker[coloring_d[neighs[index + i]]] = 1; } float reminder = 0; uint32_t Zn = 0, Zp = nCol; //number of free colors (p) and occupied colors (n) for (int i = 0; i < nCol; i++) { Zn += colorsChecker[i] != 0; reminder += (colorsChecker[i] != 0) * (probDistribution_d[i] - epsilon); } Zp -= Zn; if (!Zp) { //manage exception of no free colors starColoring_d[idx] = nodeCol; qStar_d[idx] = 1; return; } float denomReminder = 0; for (int i = 0; i < Zp; i++) denomReminder += exp(-lambda * i); int i = 0, j = 0; float q; float threshold = 0; float randnum = hiprand_uniform(&states[idx]); //random number if (colorsChecker[nodeCol]) { //if node color is used by neighbors do { float r = reminder * (exp(-lambda * j) / denomReminder); q = (probDistribution_d[i] + r) * (!colorsChecker[i]) + (epsilon) * (colorsChecker[i]); threshold += q; j += !colorsChecker[i]; i++; } while (threshold < randnum && i < nCol); } else { do { q = (1.0f - (nCol - 1) * epsilon) * (nodeCol == i) + (epsilon) * (nodeCol != i); threshold += q; i++; } while (threshold < randnum && i < nCol); } qStar_d[idx] = q; //save the probability of the color chosen starColoring_d[idx] = i - 1; #ifdef TABOO taboo_d[idx] = (starColoring_d[idx] == nodeCol) * tabooIteration; #endif // TABOO }
d6deacfe3830da2e286b12151eb3f8aa18377a83.cu
// This is a personal academic project. Dear PVS-Studio, please check it. // PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com #include "coloringMCMC.h" __global__ void ColoringMCMC_k::selectStarColoringDecrease(uint32_t nnodes, uint32_t * starColoring_d, float * qStar_d, col_sz nCol, uint32_t * coloring_d, node_sz * cumulDegs, node * neighs, bool * colorsChecker_d, uint32_t * taboo_d, uint32_t tabooIteration, float * probDistribution_d, curandState * states, float lambda, float epsilon, uint32_t * statsFreeColors_d) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= nnodes) return; #ifdef TABOO if (taboo_d[idx] > 0) { taboo_d[idx]--; qStar_d[idx] = (1.0f - (nCol - 1) * epsilon); //save the probability of the color chosen return; } #endif // TABOO uint32_t index = cumulDegs[idx]; //index of the node in neighs uint32_t nneighs = cumulDegs[idx + 1] - index; //number of neighbors uint32_t nodeCol = coloring_d[idx]; //node color bool * colorsChecker = &(colorsChecker_d[idx * nCol]); //array used to set if a color is used from the neighbors for (int i = 0; i < nneighs; i++) { colorsChecker[coloring_d[neighs[index + i]]] = 1; } float reminder = 0; uint32_t Zn = 0, Zp = nCol; //number of free colors (p) and occupied colors (n) for (int i = 0; i < nCol; i++) { Zn += colorsChecker[i] != 0; reminder += (colorsChecker[i] != 0) * (probDistribution_d[i] - epsilon); } Zp -= Zn; if (!Zp) { //manage exception of no free colors starColoring_d[idx] = nodeCol; qStar_d[idx] = 1; return; } float denomReminder = 0; for (int i = 0; i < Zp; i++) denomReminder += exp(-lambda * i); int i = 0, j = 0; float q; float threshold = 0; float randnum = curand_uniform(&states[idx]); //random number if (colorsChecker[nodeCol]) { //if node color is used by neighbors do { float r = reminder * (exp(-lambda * j) / denomReminder); q = (probDistribution_d[i] + r) * (!colorsChecker[i]) + (epsilon) * (colorsChecker[i]); threshold += q; j += !colorsChecker[i]; i++; } while (threshold < randnum && i < nCol); } else { do { q = (1.0f - (nCol - 1) * epsilon) * (nodeCol == i) + (epsilon) * (nodeCol != i); threshold += q; i++; } while (threshold < randnum && i < nCol); } qStar_d[idx] = q; //save the probability of the color chosen starColoring_d[idx] = i - 1; #ifdef TABOO taboo_d[idx] = (starColoring_d[idx] == nodeCol) * tabooIteration; #endif // TABOO }
67f59b1bb06d3e2996b22159cd5e099a6616b412.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <time.h> #define AND 0 #define OR 1 #define NAND 2 #define NOR 3 #define XOR 4 #define XNOR 5 __global__ void computeLogicGates(char* d_input, char* d_output, int size) { // calculate the index of the thread int index = threadIdx.x + blockIdx.x * blockDim.x; int input_index = index * 3; // if the index is inside the range of the array if (input_index < size) { int output; switch (d_input[input_index+2] - '0') { case AND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 1; else output = 0; break; case OR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 0; else output = 1; break; case NAND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 0; else output = 1; break; case NOR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 1; else output = 0; break; case XOR: if (d_input[input_index] == d_input[input_index+1]) output = 0; else output = 1; break; case XNOR: if (d_input[input_index] == d_input[input_index+1]) output = 1; else output = 0; break; } d_output[index] = output + '0'; } } int main(int argc, char* argv[]) { // check if necessary arguments are provided if (argc == 1) { return printf("No arguments are provided! Please provide the input file path, input file length and the output file path!"); } else if (argc == 2) { return printf("Input file length and output file path are not provided!"); } else if (argc == 3) { return printf("Output file path is not provided!"); } char* input_file = argv[1]; int input_size = atoi(argv[2]); char* output_file = argv[3]; // read the input file FILE* input_fptr; input_fptr = fopen(input_file, "r"); if (!input_fptr) return printf("Error opening the input file!"); // read the file line by line and populate input_data array char line[10]; // allocate CUDA variables char* d_input; char* d_output; int input_array_size = input_size * 3 * sizeof(char); int output_array_size = input_size * sizeof(char); hipMallocManaged(&d_input, input_array_size); hipMallocManaged(&d_output, output_array_size); for (int i = 0; i < input_size; i++) { fgets(line, 9, input_fptr); d_input[i*3] = line[0]; d_input[i*3+1] = line[2]; d_input[i*3+2] = line[4]; } // close file pointer fclose(input_fptr); clock_t start = clock(); // call device kernel hipLaunchKernelGGL(( computeLogicGates), dim3(input_size), dim3(1), 0, 0, d_input, d_output, input_array_size); // synchronize threads hipDeviceSynchronize(); clock_t end = clock(); // write the results into the output file FILE* output_fptr; output_fptr = fopen(output_file, "w"); if(!output_fptr) return printf("Error opening output file!"); for (int i = 0; i < input_size; i++) { char data[3]; sprintf(data, "%c\n", d_output[i]); fputs(data, output_fptr); } // close file pointer fclose(output_fptr); // free up device memory hipFree(d_input); hipFree(d_output); // calculate execution time double runtime = (double) (end-start) / CLOCKS_PER_SEC; printf("Execution time: %f ms\n", runtime * 1000); return 0; }
67f59b1bb06d3e2996b22159cd5e099a6616b412.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <time.h> #define AND 0 #define OR 1 #define NAND 2 #define NOR 3 #define XOR 4 #define XNOR 5 __global__ void computeLogicGates(char* d_input, char* d_output, int size) { // calculate the index of the thread int index = threadIdx.x + blockIdx.x * blockDim.x; int input_index = index * 3; // if the index is inside the range of the array if (input_index < size) { int output; switch (d_input[input_index+2] - '0') { case AND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 1; else output = 0; break; case OR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 0; else output = 1; break; case NAND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 0; else output = 1; break; case NOR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 1; else output = 0; break; case XOR: if (d_input[input_index] == d_input[input_index+1]) output = 0; else output = 1; break; case XNOR: if (d_input[input_index] == d_input[input_index+1]) output = 1; else output = 0; break; } d_output[index] = output + '0'; } } int main(int argc, char* argv[]) { // check if necessary arguments are provided if (argc == 1) { return printf("No arguments are provided! Please provide the input file path, input file length and the output file path!"); } else if (argc == 2) { return printf("Input file length and output file path are not provided!"); } else if (argc == 3) { return printf("Output file path is not provided!"); } char* input_file = argv[1]; int input_size = atoi(argv[2]); char* output_file = argv[3]; // read the input file FILE* input_fptr; input_fptr = fopen(input_file, "r"); if (!input_fptr) return printf("Error opening the input file!"); // read the file line by line and populate input_data array char line[10]; // allocate CUDA variables char* d_input; char* d_output; int input_array_size = input_size * 3 * sizeof(char); int output_array_size = input_size * sizeof(char); cudaMallocManaged(&d_input, input_array_size); cudaMallocManaged(&d_output, output_array_size); for (int i = 0; i < input_size; i++) { fgets(line, 9, input_fptr); d_input[i*3] = line[0]; d_input[i*3+1] = line[2]; d_input[i*3+2] = line[4]; } // close file pointer fclose(input_fptr); clock_t start = clock(); // call device kernel computeLogicGates<<<input_size, 1>>>(d_input, d_output, input_array_size); // synchronize threads cudaDeviceSynchronize(); clock_t end = clock(); // write the results into the output file FILE* output_fptr; output_fptr = fopen(output_file, "w"); if(!output_fptr) return printf("Error opening output file!"); for (int i = 0; i < input_size; i++) { char data[3]; sprintf(data, "%c\n", d_output[i]); fputs(data, output_fptr); } // close file pointer fclose(output_fptr); // free up device memory cudaFree(d_input); cudaFree(d_output); // calculate execution time double runtime = (double) (end-start) / CLOCKS_PER_SEC; printf("Execution time: %f ms\n", runtime * 1000); return 0; }
a06b5290817e2421d35359fcdcc50293cba1cb26.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> /* Tested blocks/threads: 256/256 256/1024 128/1024 512/1024 768/1024 */ #define NUM_BLOCK 768 #define NUM_THREAD 1024 #define PI 3.14159265358979323846 /* Kernel function */ __global__ void cal_pi(double *mypi, int iter, double m, int nthreads, int nblocks) { int i; double ni; int idx = blockIdx.x * blockDim.x + threadIdx.x; // Sequential thread index across the blocks for (i = idx; i< iter; i += nthreads * nblocks) { ni = (i + 0.5) * m; mypi[idx] += 4.0/(1.0 + ni * ni); } } int main(void) { double pi = 0; int iteArr[3] = { 24000000, 48000000, 94000000 }; //double iteArr[3] = { 24000000000, 48000000000, 94000000000 }; //int iteArr[3] = { 1, 2, 4 }; /* Setting upt grid and block dimesions */ dim3 dimGrid(NUM_BLOCK,1,1); dim3 dimBlock(NUM_THREAD,1,1); printf("REPORT # of blocks = %d, # of threads/block = %d\n", NUM_BLOCK, NUM_THREAD); /* Host and Device variables (arrays) */ double *h_pi, *d_pi; for (int i = 0; i < 3; i++){ int currentIter = iteArr[i]; double step = 1.0 / currentIter; size_t size = NUM_BLOCK*NUM_THREAD*sizeof(double); /* Allocate on host*/ h_pi = (double *)malloc(size); /*Allocate on device*/ hipMalloc((void **) &d_pi, size); /* Set d_pi to zero */ hipMemset(d_pi, 0, size); /* Run Kernel */ hipLaunchKernelGGL(( cal_pi) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_pi, currentIter, step, NUM_THREAD, NUM_BLOCK); /* Copy results from device to the host*/ hipMemcpy(h_pi, d_pi, size, hipMemcpyDeviceToHost); /* Finish pi in host */ for( int j = 0; j < NUM_THREAD*NUM_BLOCK; j++) pi += h_pi[j]; pi *= step; printf("\tMyPI = %20.18f \n",pi); printf("\tMyPI - PI = %20.18f \n",pi-PI); } printf("\tCheck nvprof for more time estimation.\n\n"); /* Clean host and device var*/ free(h_pi); hipFree(d_pi); return 0; }
a06b5290817e2421d35359fcdcc50293cba1cb26.cu
#include <stdio.h> #include <cuda.h> /* Tested blocks/threads: 256/256 256/1024 128/1024 512/1024 768/1024 */ #define NUM_BLOCK 768 #define NUM_THREAD 1024 #define PI 3.14159265358979323846 /* Kernel function */ __global__ void cal_pi(double *mypi, int iter, double m, int nthreads, int nblocks) { int i; double ni; int idx = blockIdx.x * blockDim.x + threadIdx.x; // Sequential thread index across the blocks for (i = idx; i< iter; i += nthreads * nblocks) { ni = (i + 0.5) * m; mypi[idx] += 4.0/(1.0 + ni * ni); } } int main(void) { double pi = 0; int iteArr[3] = { 24000000, 48000000, 94000000 }; //double iteArr[3] = { 24000000000, 48000000000, 94000000000 }; //int iteArr[3] = { 1, 2, 4 }; /* Setting upt grid and block dimesions */ dim3 dimGrid(NUM_BLOCK,1,1); dim3 dimBlock(NUM_THREAD,1,1); printf("REPORT # of blocks = %d, # of threads/block = %d\n", NUM_BLOCK, NUM_THREAD); /* Host and Device variables (arrays) */ double *h_pi, *d_pi; for (int i = 0; i < 3; i++){ int currentIter = iteArr[i]; double step = 1.0 / currentIter; size_t size = NUM_BLOCK*NUM_THREAD*sizeof(double); /* Allocate on host*/ h_pi = (double *)malloc(size); /*Allocate on device*/ cudaMalloc((void **) &d_pi, size); /* Set d_pi to zero */ cudaMemset(d_pi, 0, size); /* Run Kernel */ cal_pi <<<dimGrid, dimBlock>>> (d_pi, currentIter, step, NUM_THREAD, NUM_BLOCK); /* Copy results from device to the host*/ cudaMemcpy(h_pi, d_pi, size, cudaMemcpyDeviceToHost); /* Finish pi in host */ for( int j = 0; j < NUM_THREAD*NUM_BLOCK; j++) pi += h_pi[j]; pi *= step; printf("\tMyPI = %20.18f \n",pi); printf("\tMyPI - PI = %20.18f \n",pi-PI); } printf("\tCheck nvprof for more time estimation.\n\n"); /* Clean host and device var*/ free(h_pi); cudaFree(d_pi); return 0; }
8d6d48d56a3df3ff424a4940b486e75687a1ee24.hip
// !!! This is a file automatically generated by hipify!!! /* BIMODAL v1 * David W. Pearson * September 28, 2017 * * This version of the code will implement some improvements to make the model better fit non-linear * features present in the data. The algorithm is effectively that of Gil-Marin 2012/2015. */ #include <iostream> #include <fstream> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_vector_types.h> #include <gsl/gsl_spline.h> #include "include/harppi.h" #include "include/cspline.h" #include "include/dewiggle.h" #include "include/gpuerrchk.h" #include "include/mcmc.h" #include "include/make_spline.h" double get_k_nl(std::string P_lin_file); std::vector<double> get_Bk_NW(std::string file); void write_spline(std::string file, std::vector<double4> &spline); int main(int argc, char *argv[]) { // Use HARPPI hidden in an object file to parse parameters parameters p(argv[1]); // Generate cubic splines of the input BAO and NW power spectra std::vector<double4> Pk_spline = make_spline(p.gets("input_power")); std::vector<double> k; std::vector<double> n; get_dewiggled_slope(p.gets("in_pk_lin_file"), k, n); std::vector<double4> nk_spline = make_uniform_spline(k, n, 877, p.getd("k_min"), p.getd("k_max")); std::vector<double> Q_3; Q_3.reserve(n.size()); for (size_t i = 0; i < n.size(); ++i) { Q_3.push_back((4.0 - pow(2.0, n[i]))/(1.0 + pow(2.0, n[i] + 1.0))); } std::vector<double4> Q3_spline = make_uniform_spline(k, Q_3, 877, p.getd("k_min"), p.getd("k_max")); std::cout << "Pk_spline.size() = " << Pk_spline.size() << std::endl; std::cout << "nk_spline.size() = " << nk_spline.size() << std::endl; std::cout << "Q3_spline.size() = " << Q3_spline.size() << std::endl; write_spline("Pk_spline.dat", Pk_spline); write_spline("nk_spline.dat", nk_spline); write_spline("Q3_spline.dat", Q3_spline); // Copy the splines to the allocated GPU memory gpuErrchk(hipMemcpyToSymbol(d_Pk, Pk_spline.data(), 128*sizeof(double4))); gpuErrchk(hipMemcpyToSymbol(d_n, nk_spline.data(), 877*sizeof(double4))); gpuErrchk(hipMemcpyToSymbol(d_Q3, Q3_spline.data(), 877*sizeof(double4))); // Copy Gaussian Quadrature weights and evaluation point to GPU constant memory gpuErrchk(hipMemcpyToSymbol(d_w, &w_i[0], 32*sizeof(double))); gpuErrchk(hipMemcpyToSymbol(d_x, &x_i[0], 32*sizeof(double))); gpuErrchk(hipMemcpyToSymbol(d_aF, a_F, 9*sizeof(double))); gpuErrchk(hipMemcpyToSymbol(d_aG, a_G, 9*sizeof(double))); double k_nl = get_k_nl(p.gets("in_pk_lin_file")); gpuErrchk(hipMemcpyToSymbol(d_knl, &k_nl, sizeof(double))); std::vector<double> Bk_NW = get_Bk_NW(p.gets("Bk_NW_file")); // Declare a pointer for the integration workspace and allocate memory on the GPU double *d_Bk; double *d_BkNW; double4 *d_ks; gpuErrchk(hipMalloc((void **)&d_Bk, p.geti("num_data")*sizeof(double))); gpuErrchk(hipMalloc((void **)&d_BkNW, p.geti("num_data")*sizeof(double))); gpuErrchk(hipMalloc((void **)&d_ks, p.geti("num_data")*sizeof(double4))); gpuErrchk(hipMemcpy(d_BkNW, Bk_NW.data(), Bk_NW.size()*sizeof(double), hipMemcpyHostToDevice)); std::vector<double> start_params; std::vector<bool> limit_params; std::vector<double> var_i; std::vector<double> min; std::vector<double> max; for (int i = 0; i < p.geti("num_params"); ++i) { start_params.push_back(p.getd("start_params", i)); limit_params.push_back(p.getb("limit_params", i)); var_i.push_back(p.getd("vars", i)); min.push_back(p.getd("min_params", i)); max.push_back(p.getd("max_params", i)); } // Initialize bkmcmc object bkmcmc bk_fit(p.gets("data_file"), p.gets("cov_file"), start_params, var_i, d_ks, d_Bk, d_BkNW, p.geti("num_write")); // Check that the initialization worked bk_fit.check_init(); // Set any limits on the parameters bk_fit.set_param_limits(limit_params, min, max); // Run the MCMC chain bk_fit.run_chain(p.geti("num_draws"), p.geti("num_burn"), p.gets("reals_file"), d_ks, d_Bk, d_BkNW, p.getb("new_chain")); // Free device pointers gpuErrchk(hipFree(d_Bk)); gpuErrchk(hipFree(d_ks)); gpuErrchk(hipFree(d_BkNW)); return 0; } double get_k_nl(std::string P_lin_file) { double k_nl; std::vector<double> prod; std::vector<double> k; std::ifstream fin(P_lin_file); while (!fin.eof()) { double kt, P; fin >> kt >> P; if (!fin.eof()) { k.push_back(kt); prod.push_back(kt*kt*kt*P/(2.0*PI*PI)); } } fin.close(); gsl_spline *Getknl = gsl_spline_alloc(gsl_interp_cspline, k.size()); gsl_interp_accel *acc = gsl_interp_accel_alloc(); gsl_spline_init(Getknl, prod.data(), k.data(), k.size()); k_nl = gsl_spline_eval(Getknl, 1.0, acc); std::cout.precision(15); std::cout << "k_nl = " << k_nl << std::endl; gsl_spline_free(Getknl); gsl_interp_accel_free(acc); return k_nl; } std::vector<double> get_Bk_NW(std::string file) { std::vector<double> B_NW; std::ifstream fin(file); while(!fin.eof()) { double l, k1, k2, k3, B; fin >> l >> k1 >> k2 >> k3 >> B; if (!fin.eof()) { B_NW.push_back(B); } } fin.close(); return B_NW; } void write_spline(std::string file, std::vector<double4> &spline) { std::ofstream fout(file); fout.precision(15); for (size_t i = 0; i < spline.size(); ++i) { fout << spline[i].x << " " << spline[i].y << " " << spline[i].z << " " << spline[i].w << "\n"; } fout.close(); }
8d6d48d56a3df3ff424a4940b486e75687a1ee24.cu
/* BIMODAL v1 * David W. Pearson * September 28, 2017 * * This version of the code will implement some improvements to make the model better fit non-linear * features present in the data. The algorithm is effectively that of Gil-Marin 2012/2015. */ #include <iostream> #include <fstream> #include <vector> #include <cuda.h> #include <vector_types.h> #include <gsl/gsl_spline.h> #include "include/harppi.h" #include "include/cspline.h" #include "include/dewiggle.h" #include "include/gpuerrchk.h" #include "include/mcmc.h" #include "include/make_spline.h" double get_k_nl(std::string P_lin_file); std::vector<double> get_Bk_NW(std::string file); void write_spline(std::string file, std::vector<double4> &spline); int main(int argc, char *argv[]) { // Use HARPPI hidden in an object file to parse parameters parameters p(argv[1]); // Generate cubic splines of the input BAO and NW power spectra std::vector<double4> Pk_spline = make_spline(p.gets("input_power")); std::vector<double> k; std::vector<double> n; get_dewiggled_slope(p.gets("in_pk_lin_file"), k, n); std::vector<double4> nk_spline = make_uniform_spline(k, n, 877, p.getd("k_min"), p.getd("k_max")); std::vector<double> Q_3; Q_3.reserve(n.size()); for (size_t i = 0; i < n.size(); ++i) { Q_3.push_back((4.0 - pow(2.0, n[i]))/(1.0 + pow(2.0, n[i] + 1.0))); } std::vector<double4> Q3_spline = make_uniform_spline(k, Q_3, 877, p.getd("k_min"), p.getd("k_max")); std::cout << "Pk_spline.size() = " << Pk_spline.size() << std::endl; std::cout << "nk_spline.size() = " << nk_spline.size() << std::endl; std::cout << "Q3_spline.size() = " << Q3_spline.size() << std::endl; write_spline("Pk_spline.dat", Pk_spline); write_spline("nk_spline.dat", nk_spline); write_spline("Q3_spline.dat", Q3_spline); // Copy the splines to the allocated GPU memory gpuErrchk(cudaMemcpyToSymbol(d_Pk, Pk_spline.data(), 128*sizeof(double4))); gpuErrchk(cudaMemcpyToSymbol(d_n, nk_spline.data(), 877*sizeof(double4))); gpuErrchk(cudaMemcpyToSymbol(d_Q3, Q3_spline.data(), 877*sizeof(double4))); // Copy Gaussian Quadrature weights and evaluation point to GPU constant memory gpuErrchk(cudaMemcpyToSymbol(d_w, &w_i[0], 32*sizeof(double))); gpuErrchk(cudaMemcpyToSymbol(d_x, &x_i[0], 32*sizeof(double))); gpuErrchk(cudaMemcpyToSymbol(d_aF, a_F, 9*sizeof(double))); gpuErrchk(cudaMemcpyToSymbol(d_aG, a_G, 9*sizeof(double))); double k_nl = get_k_nl(p.gets("in_pk_lin_file")); gpuErrchk(cudaMemcpyToSymbol(d_knl, &k_nl, sizeof(double))); std::vector<double> Bk_NW = get_Bk_NW(p.gets("Bk_NW_file")); // Declare a pointer for the integration workspace and allocate memory on the GPU double *d_Bk; double *d_BkNW; double4 *d_ks; gpuErrchk(cudaMalloc((void **)&d_Bk, p.geti("num_data")*sizeof(double))); gpuErrchk(cudaMalloc((void **)&d_BkNW, p.geti("num_data")*sizeof(double))); gpuErrchk(cudaMalloc((void **)&d_ks, p.geti("num_data")*sizeof(double4))); gpuErrchk(cudaMemcpy(d_BkNW, Bk_NW.data(), Bk_NW.size()*sizeof(double), cudaMemcpyHostToDevice)); std::vector<double> start_params; std::vector<bool> limit_params; std::vector<double> var_i; std::vector<double> min; std::vector<double> max; for (int i = 0; i < p.geti("num_params"); ++i) { start_params.push_back(p.getd("start_params", i)); limit_params.push_back(p.getb("limit_params", i)); var_i.push_back(p.getd("vars", i)); min.push_back(p.getd("min_params", i)); max.push_back(p.getd("max_params", i)); } // Initialize bkmcmc object bkmcmc bk_fit(p.gets("data_file"), p.gets("cov_file"), start_params, var_i, d_ks, d_Bk, d_BkNW, p.geti("num_write")); // Check that the initialization worked bk_fit.check_init(); // Set any limits on the parameters bk_fit.set_param_limits(limit_params, min, max); // Run the MCMC chain bk_fit.run_chain(p.geti("num_draws"), p.geti("num_burn"), p.gets("reals_file"), d_ks, d_Bk, d_BkNW, p.getb("new_chain")); // Free device pointers gpuErrchk(cudaFree(d_Bk)); gpuErrchk(cudaFree(d_ks)); gpuErrchk(cudaFree(d_BkNW)); return 0; } double get_k_nl(std::string P_lin_file) { double k_nl; std::vector<double> prod; std::vector<double> k; std::ifstream fin(P_lin_file); while (!fin.eof()) { double kt, P; fin >> kt >> P; if (!fin.eof()) { k.push_back(kt); prod.push_back(kt*kt*kt*P/(2.0*PI*PI)); } } fin.close(); gsl_spline *Getknl = gsl_spline_alloc(gsl_interp_cspline, k.size()); gsl_interp_accel *acc = gsl_interp_accel_alloc(); gsl_spline_init(Getknl, prod.data(), k.data(), k.size()); k_nl = gsl_spline_eval(Getknl, 1.0, acc); std::cout.precision(15); std::cout << "k_nl = " << k_nl << std::endl; gsl_spline_free(Getknl); gsl_interp_accel_free(acc); return k_nl; } std::vector<double> get_Bk_NW(std::string file) { std::vector<double> B_NW; std::ifstream fin(file); while(!fin.eof()) { double l, k1, k2, k3, B; fin >> l >> k1 >> k2 >> k3 >> B; if (!fin.eof()) { B_NW.push_back(B); } } fin.close(); return B_NW; } void write_spline(std::string file, std::vector<double4> &spline) { std::ofstream fout(file); fout.precision(15); for (size_t i = 0; i < spline.size(); ++i) { fout << spline[i].x << " " << spline[i].y << " " << spline[i].z << " " << spline[i].w << "\n"; } fout.close(); }
38cc3e116bf0fe8597ea5f4dd6e7dea6a4300673.hip
// !!! This is a file automatically generated by hipify!!! /*********************************************************************************************************************** This file is part of "SIFT-GPU-Thesis" project. * Copyright (C) 2015 {Christos Papazachariou} <{christospapazachariou@gmail.com}> * University of Piraeus- MSc program: Advanced Information Systems ("Embedded System Technologies" branch). * Partial or otherwise use of this project and/or source code is permitted only for educational and academic purposes. * Partial or otherwise use of this project and/or source code for commercial or militay applications is prohibited. ***********************************************************************************************************************/ #include "sift.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" extern "C" { #include "gpusift.h" #include "MYtimer.h" } extern "C" { void GPUbuildSS(unsigned char* octaves[MAX_O], //ptr to input image float* scaleSpace [MAX_O][MAX_S], //ptr to output image int O, int S, //octave no, blur level no int* octavesW, int* octavesH, //ptr to dims of output float sigmas[MAX_O][MAX_S]){ //input sigma for each scale //COMMON VARS int i=0; int j=0; int ii=0; int gR[MAX_O][MAX_S]; int gW[MAX_O][MAX_S]; //gauss coefficient window width float* coef1d[MAX_O][MAX_S]; float tmp; float norm = 0.0f; //CUDA VARS unsigned char* GPUoctaves[MAX_O]; float* GPUscaleSpace[MAX_O][MAX_S]; float* GPUcoef1d[MAX_O][MAX_S]; //TODO:CONSTANT MEMORY float* GPUintermediate[MAX_O][MAX_S]; unsigned char* GPUsrcPadded[MAX_O][MAX_S]; printf("GPUbuildSS call.\n"); //CPU MALLOC + CALCULATE PADDINGS printf(" calculating paddings and allocating RAM...\n"); logtime("GPUbuildSS-CPU padding+coef1d malloc loop start"); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale // derive gR and gW if(sigmas[i][j]*4.0f > 1.0f){ gR[i][j] = (int)(sigmas[i][j] * 4.0f); }else{ gR[i][j] = 1; } gW[i][j] = gR[i][j] * 2 + 1; //malloc the 1d gauss coefficient coef1d[i][j]=(float*)malloc(gW[i][j]*sizeof(float)); } } logtime("GPUbuildSS-CPU padding+coef1d malloc loop end"); /**************************************** * Compute Gaussian Coefficients ***************************************/ printf(" calculating Gaussian coefficients...\n"); logtime("GPUbuildSS-CPU coef1d calc loop start"); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale norm=0.0f; for(ii = 0; ii < gW[i][j]; ii++){ tmp = (float)((float)ii - (float)gR[i][j]) / sigmas[i][j]; coef1d[i][j][ii] = exp( -1.0f * tmp * tmp / 2.0f ); norm = norm + coef1d[i][j][ii]; } for(ii = 0; ii < gW[i][j]; ii++){ coef1d[i][j][ii] = coef1d[i][j][ii] / norm; } } } logtime("GPUbuildSS-CPU coef1d calc loop end"); //CUDA MALLOCS printf(" allocating GPU memory...\n"); showFreeBytes(); logtime("GPUbuildSS-hipMalloc loops start"); for (i=0;i<O;i++){ //for each octave if(hipMalloc(&GPUoctaves[i],sizeof(unsigned char)*octavesW[i]*octavesH[i])!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMalloc Error! GPUoctaves[%d]\tCUDA error: %s\n",i,hipGetErrorString(error)); hipFree(GPUoctaves[i]); } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (hipMalloc(&GPUscaleSpace[i][j],sizeof(float)*octavesH[i]*octavesW[i])!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMalloc Error! GPUscaleSpace[%d][%d]\tCUDA error: %s\n",i,j,hipGetErrorString(error)); hipFree(GPUscaleSpace[i][j]); } } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (hipMalloc(&GPUcoef1d[i][j],sizeof(float)*gW[i][j])!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMalloc Error! GPUcoef1d[%d][%d]\tCUDA error: %s\n",i,j,hipGetErrorString(error)); hipFree(GPUcoef1d[i][j]); } } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (hipMalloc(&GPUsrcPadded[i][j],sizeof(unsigned char)*(octavesH[i]+gR[i][j]*2)*(octavesW[i]+gR[i][j]*2))!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMalloc Error! GPUsrcPadded[%d][%d]\tCUDA error: %s\n",i,j,hipGetErrorString(error)); hipFree(GPUsrcPadded[i][j]); } } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (hipMalloc(&GPUintermediate[i][j],sizeof(float)*(octavesH[i]+gR[i][j]*2)*(octavesW[i]+gR[i][j]*2))!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMalloc Error! GPUintermediate[%d][%d]\tCUDA error: %s\n",i,j,hipGetErrorString(error)); hipFree(GPUintermediate[i][j]); } } } logtime("GPUbuildSS-hipMalloc loops end"); showFreeBytes(); //CUDA MEMCPYS TO DEV printf(" copying data to GPU memory...\n"); logtime("GPUbuildSS-hipMemcpyHostToDevice loops start"); for (i=0;i<O;i++){ if(hipMemcpy(GPUoctaves[i],octaves[i],sizeof(unsigned char)*octavesW[i]*octavesH[i],hipMemcpyHostToDevice)!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMemcpyHostToDevice Error! GPUoctaves[%d]\tCUDA error: %s\n",i,hipGetErrorString(error)); hipFree(GPUoctaves[i]); } } for (i=0;i<O;i++){ for (j=0;j<S;j++){ if(hipMemcpy(GPUcoef1d[i][j],coef1d[i][j],sizeof(float)*gW[i][j],hipMemcpyHostToDevice)!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMemcpyHostToDevice Error! GPUcoef1d[%d][%d]\tCUDA error: %s\n",i,j,hipGetErrorString(error)); hipFree(GPUoctaves[i]); } } } logtime("GPUbuildSS-hipMemcpyHostToDevice loops end"); //CUDA KERNEL CALLS printf(" calling padding GPU kernel...\n"); logtime("GPUbuildSS-Padding Kernel loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ const dim3 block(32,32); //reasonable block size (1024 threads/block) const dim3 grid((octavesW[i]+gR[i][j]*2+block.x-1)/block.x,(octavesH[i]+gR[i][j]*2+block.x-1)/block.y); //make sure at least 1x1 kernel hipLaunchKernelGGL(( GPUsrcPadding), dim3(grid), block, 0, 0, GPUoctaves[i], //input image (uchar*) GPUsrcPadded[i][j], //output image (uchar*) GPUintermediate[i][j], //output image (float*) gR[i][j], //gaussian coef radius octavesH[i],octavesW[i]); //input image dimensions } } logtime("GPUbuildSS-Padding Kernel loop end"); printf(" calling GPUGaussX kernel...\n"); logtime("GPUbuildSS-GaussX Kernel loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ const dim3 block(32,32); //reasonable block size (1024 threads/block) const dim3 grid((octavesW[i]+gR[i][j]*2+block.x-1)/block.x,(octavesH[i]+gR[i][j]*2+block.x-1)/block.y); //make sure at least 1x1 kernel hipLaunchKernelGGL(( GPUGaussX), dim3(grid), block, 0, 0, GPUsrcPadded[i][j], //input image (uchar*) GPUintermediate[i][j], //output image (float*) GPUcoef1d[i][j], //input gaussian coef 1D array gR[i][j], //gaussian coef radius octavesH[i],octavesW[i]); //input image dimensions } } logtime("GPUbuildSS-GaussX Kernel loop end"); printf(" calling GPUGaussY kernel...\n"); logtime("GPUbuildSS-GaussY Kernel loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ const dim3 block(32,32); //reasonable block size (1024 threads/block) const dim3 grid((octavesW[i]+gR[i][j]*2+block.x-1)/block.x,(octavesH[i]+gR[i][j]*2+block.x-1)/block.y); //make sure at least 1x1 kernel hipLaunchKernelGGL(( GPUGaussY), dim3(grid), block, 0, 0, GPUintermediate[i][j], //input image (float*) GPUscaleSpace[i][j], //output image(float*) GPUcoef1d[i][j], //input gaussian coef 1D array gR[i][j], //gaussian coef radius octavesH[i],octavesW[i]); //input image dimensions } } logtime("GPUbuildSS-GaussY Kernel loop end"); //CUDA MEMCPYS TO HOST printf(" copying data from GPU memory to RAM...\n"); showFreeBytes(); logtime("GPUbuildSS-hipMemcpyDeviceToHost loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ if(hipMemcpy(scaleSpace[i][j],GPUscaleSpace[i][j],sizeof(float)*octavesW[i]*octavesH[i],hipMemcpyDeviceToHost)!=hipSuccess){ hipError_t error = hipGetLastError(); printf ("hipMemcpyDeviceToHost Error! scaleSpace[%d][%d]\tCUDA error: %s\n",i,j,hipGetErrorString(error)); hipFree(GPUscaleSpace[i][j]); } } } logtime("GPUbuildSS-hipMemcpyDeviceToHost loop end"); showFreeBytes(); //CUDAFREES printf(" clearing GPU memory...\n"); hipFree(GPUscaleSpace); hipFree(GPUoctaves); hipFree(GPUcoef1d); hipFree(GPUsrcPadded); hipFree(GPUintermediate); showFreeBytes(); //CPU MEMORY CLEANUP printf(" clearing RAM...\n"); //free(coef1d); //gia kapoio logo crasharei edw printf("GPUbuildSS complete!\n"); } } extern "C" { __global__ void GPUsrcPadding(unsigned char* srcIn, //input image unsigned char* srcPaddedOut, //output image float* intermediate, //output image int gR, //input gaussian radius int inH, int inW){ //input image dims const int x=blockIdx.x*blockDim.x+threadIdx.x; const int y=blockIdx.y*blockDim.y+threadIdx.y; if ((x<inW+gR) && (y<inH+gR)){ //only valid threads if( x < gR || y > inH+gR-1 || y < gR || x > inW+gR-1 ){ // padding srcPadded srcPaddedOut[y*(inW+gR*2)+x] = 0; // padding intermediate intermediate[y*(inW+gR*2)+x] = 0.0f; }else{ // copy the src to srcPadded srcPaddedOut[y*(inW+gR*2)+x] = srcIn[(y-gR)*inW+(x-gR)]; } } } } extern "C" { __global__ void GPUGaussX(unsigned char* input, //input image (sourcePadded) float* output, //output image (intermediate) float* coef1d, //input gaussian coefficient 1d int gR, //input gaussian radius int inH, int inW){ //input image dims const int x=blockIdx.x*blockDim.x+threadIdx.x; const int y=blockIdx.y*blockDim.y+threadIdx.y; float accu = 0.0f; int ii=0; if (x>=gR && y>=gR && y<inH+gR && x<inW+gR){ //for loop starts at gR for i and j... for(ii = (-1 * gR); ii < (gR + 1); ii++){ // convolute with gaussian window // note: srcPadded is unsigned char accu += coef1d[ii + gR] * (float)input[y*(inW+gR*2)+x+ii]; } output[(y)*(inW+gR*2)+(x)] = accu; } } } extern "C" { __global__ void GPUGaussY(float* input, //input image (intermediate) float* output, //output image (blurred) float* coef1d, //input gaussian coefficient 1d int gR, //input gaussian radius int inH, int inW){ //input image dims float accu = 0.0f; int ii=0; const int x=blockIdx.x*blockDim.x+threadIdx.x; const int y=blockIdx.y*blockDim.y+threadIdx.y; if (x>=gR && y>=gR && y<inH+gR && x<inW+gR){ //for loop starts at gR for i and j... for(ii = (-1 * gR); ii < (gR + 1); ii++){ // convolute with gaussian window accu += coef1d[ii + gR] * (float) input[(y+ii)*(inW+gR*2)+x]; } output[(y-gR)*inW+(x-gR)] = accu; } } }
38cc3e116bf0fe8597ea5f4dd6e7dea6a4300673.cu
/*********************************************************************************************************************** This file is part of "SIFT-GPU-Thesis" project. * Copyright (C) 2015 {Christos Papazachariou} <{christospapazachariou@gmail.com}> * University of Piraeus- MSc program: Advanced Information Systems ("Embedded System Technologies" branch). * Partial or otherwise use of this project and/or source code is permitted only for educational and academic purposes. * Partial or otherwise use of this project and/or source code for commercial or militay applications is prohibited. ***********************************************************************************************************************/ #include "sift.h" #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" extern "C" { #include "gpusift.h" #include "MYtimer.h" } extern "C" { void GPUbuildSS(unsigned char* octaves[MAX_O], //ptr to input image float* scaleSpace [MAX_O][MAX_S], //ptr to output image int O, int S, //octave no, blur level no int* octavesW, int* octavesH, //ptr to dims of output float sigmas[MAX_O][MAX_S]){ //input sigma for each scale //COMMON VARS int i=0; int j=0; int ii=0; int gR[MAX_O][MAX_S]; int gW[MAX_O][MAX_S]; //gauss coefficient window width float* coef1d[MAX_O][MAX_S]; float tmp; float norm = 0.0f; //CUDA VARS unsigned char* GPUoctaves[MAX_O]; float* GPUscaleSpace[MAX_O][MAX_S]; float* GPUcoef1d[MAX_O][MAX_S]; //TODO:CONSTANT MEMORY float* GPUintermediate[MAX_O][MAX_S]; unsigned char* GPUsrcPadded[MAX_O][MAX_S]; printf("GPUbuildSS call.\n"); //CPU MALLOC + CALCULATE PADDINGS printf(" calculating paddings and allocating RAM...\n"); logtime("GPUbuildSS-CPU padding+coef1d malloc loop start"); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale // derive gR and gW if(sigmas[i][j]*4.0f > 1.0f){ gR[i][j] = (int)(sigmas[i][j] * 4.0f); }else{ gR[i][j] = 1; } gW[i][j] = gR[i][j] * 2 + 1; //malloc the 1d gauss coefficient coef1d[i][j]=(float*)malloc(gW[i][j]*sizeof(float)); } } logtime("GPUbuildSS-CPU padding+coef1d malloc loop end"); /**************************************** * Compute Gaussian Coefficients ***************************************/ printf(" calculating Gaussian coefficients...\n"); logtime("GPUbuildSS-CPU coef1d calc loop start"); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale norm=0.0f; for(ii = 0; ii < gW[i][j]; ii++){ tmp = (float)((float)ii - (float)gR[i][j]) / sigmas[i][j]; coef1d[i][j][ii] = exp( -1.0f * tmp * tmp / 2.0f ); norm = norm + coef1d[i][j][ii]; } for(ii = 0; ii < gW[i][j]; ii++){ coef1d[i][j][ii] = coef1d[i][j][ii] / norm; } } } logtime("GPUbuildSS-CPU coef1d calc loop end"); //CUDA MALLOCS printf(" allocating GPU memory...\n"); showFreeBytes(); logtime("GPUbuildSS-cudaMalloc loops start"); for (i=0;i<O;i++){ //for each octave if(cudaMalloc(&GPUoctaves[i],sizeof(unsigned char)*octavesW[i]*octavesH[i])!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMalloc Error! GPUoctaves[%d]\tCUDA error: %s\n",i,cudaGetErrorString(error)); cudaFree(GPUoctaves[i]); } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (cudaMalloc(&GPUscaleSpace[i][j],sizeof(float)*octavesH[i]*octavesW[i])!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMalloc Error! GPUscaleSpace[%d][%d]\tCUDA error: %s\n",i,j,cudaGetErrorString(error)); cudaFree(GPUscaleSpace[i][j]); } } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (cudaMalloc(&GPUcoef1d[i][j],sizeof(float)*gW[i][j])!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMalloc Error! GPUcoef1d[%d][%d]\tCUDA error: %s\n",i,j,cudaGetErrorString(error)); cudaFree(GPUcoef1d[i][j]); } } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (cudaMalloc(&GPUsrcPadded[i][j],sizeof(unsigned char)*(octavesH[i]+gR[i][j]*2)*(octavesW[i]+gR[i][j]*2))!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMalloc Error! GPUsrcPadded[%d][%d]\tCUDA error: %s\n",i,j,cudaGetErrorString(error)); cudaFree(GPUsrcPadded[i][j]); } } } showFreeBytes(); for (i=0;i<O;i++){ //for each octave for (j=0;j<S;j++){ //for each scale if (cudaMalloc(&GPUintermediate[i][j],sizeof(float)*(octavesH[i]+gR[i][j]*2)*(octavesW[i]+gR[i][j]*2))!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMalloc Error! GPUintermediate[%d][%d]\tCUDA error: %s\n",i,j,cudaGetErrorString(error)); cudaFree(GPUintermediate[i][j]); } } } logtime("GPUbuildSS-cudaMalloc loops end"); showFreeBytes(); //CUDA MEMCPYS TO DEV printf(" copying data to GPU memory...\n"); logtime("GPUbuildSS-cudaMemcpyHostToDevice loops start"); for (i=0;i<O;i++){ if(cudaMemcpy(GPUoctaves[i],octaves[i],sizeof(unsigned char)*octavesW[i]*octavesH[i],cudaMemcpyHostToDevice)!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMemcpyHostToDevice Error! GPUoctaves[%d]\tCUDA error: %s\n",i,cudaGetErrorString(error)); cudaFree(GPUoctaves[i]); } } for (i=0;i<O;i++){ for (j=0;j<S;j++){ if(cudaMemcpy(GPUcoef1d[i][j],coef1d[i][j],sizeof(float)*gW[i][j],cudaMemcpyHostToDevice)!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMemcpyHostToDevice Error! GPUcoef1d[%d][%d]\tCUDA error: %s\n",i,j,cudaGetErrorString(error)); cudaFree(GPUoctaves[i]); } } } logtime("GPUbuildSS-cudaMemcpyHostToDevice loops end"); //CUDA KERNEL CALLS printf(" calling padding GPU kernel...\n"); logtime("GPUbuildSS-Padding Kernel loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ const dim3 block(32,32); //reasonable block size (1024 threads/block) const dim3 grid((octavesW[i]+gR[i][j]*2+block.x-1)/block.x,(octavesH[i]+gR[i][j]*2+block.x-1)/block.y); //make sure at least 1x1 kernel GPUsrcPadding<<<grid, block>>> (GPUoctaves[i], //input image (uchar*) GPUsrcPadded[i][j], //output image (uchar*) GPUintermediate[i][j], //output image (float*) gR[i][j], //gaussian coef radius octavesH[i],octavesW[i]); //input image dimensions } } logtime("GPUbuildSS-Padding Kernel loop end"); printf(" calling GPUGaussX kernel...\n"); logtime("GPUbuildSS-GaussX Kernel loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ const dim3 block(32,32); //reasonable block size (1024 threads/block) const dim3 grid((octavesW[i]+gR[i][j]*2+block.x-1)/block.x,(octavesH[i]+gR[i][j]*2+block.x-1)/block.y); //make sure at least 1x1 kernel GPUGaussX<<<grid, block>>> (GPUsrcPadded[i][j], //input image (uchar*) GPUintermediate[i][j], //output image (float*) GPUcoef1d[i][j], //input gaussian coef 1D array gR[i][j], //gaussian coef radius octavesH[i],octavesW[i]); //input image dimensions } } logtime("GPUbuildSS-GaussX Kernel loop end"); printf(" calling GPUGaussY kernel...\n"); logtime("GPUbuildSS-GaussY Kernel loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ const dim3 block(32,32); //reasonable block size (1024 threads/block) const dim3 grid((octavesW[i]+gR[i][j]*2+block.x-1)/block.x,(octavesH[i]+gR[i][j]*2+block.x-1)/block.y); //make sure at least 1x1 kernel GPUGaussY<<<grid, block>>> (GPUintermediate[i][j], //input image (float*) GPUscaleSpace[i][j], //output image(float*) GPUcoef1d[i][j], //input gaussian coef 1D array gR[i][j], //gaussian coef radius octavesH[i],octavesW[i]); //input image dimensions } } logtime("GPUbuildSS-GaussY Kernel loop end"); //CUDA MEMCPYS TO HOST printf(" copying data from GPU memory to RAM...\n"); showFreeBytes(); logtime("GPUbuildSS-cudaMemcpyDeviceToHost loop start"); for (i=0;i<O;i++){ for (j=0;j<S;j++){ if(cudaMemcpy(scaleSpace[i][j],GPUscaleSpace[i][j],sizeof(float)*octavesW[i]*octavesH[i],cudaMemcpyDeviceToHost)!=cudaSuccess){ cudaError_t error = cudaGetLastError(); printf ("cudaMemcpyDeviceToHost Error! scaleSpace[%d][%d]\tCUDA error: %s\n",i,j,cudaGetErrorString(error)); cudaFree(GPUscaleSpace[i][j]); } } } logtime("GPUbuildSS-cudaMemcpyDeviceToHost loop end"); showFreeBytes(); //CUDAFREES printf(" clearing GPU memory...\n"); cudaFree(GPUscaleSpace); cudaFree(GPUoctaves); cudaFree(GPUcoef1d); cudaFree(GPUsrcPadded); cudaFree(GPUintermediate); showFreeBytes(); //CPU MEMORY CLEANUP printf(" clearing RAM...\n"); //free(coef1d); //gia kapoio logo crasharei edw printf("GPUbuildSS complete!\n"); } } extern "C" { __global__ void GPUsrcPadding(unsigned char* srcIn, //input image unsigned char* srcPaddedOut, //output image float* intermediate, //output image int gR, //input gaussian radius int inH, int inW){ //input image dims const int x=blockIdx.x*blockDim.x+threadIdx.x; const int y=blockIdx.y*blockDim.y+threadIdx.y; if ((x<inW+gR) && (y<inH+gR)){ //only valid threads if( x < gR || y > inH+gR-1 || y < gR || x > inW+gR-1 ){ // padding srcPadded srcPaddedOut[y*(inW+gR*2)+x] = 0; // padding intermediate intermediate[y*(inW+gR*2)+x] = 0.0f; }else{ // copy the src to srcPadded srcPaddedOut[y*(inW+gR*2)+x] = srcIn[(y-gR)*inW+(x-gR)]; } } } } extern "C" { __global__ void GPUGaussX(unsigned char* input, //input image (sourcePadded) float* output, //output image (intermediate) float* coef1d, //input gaussian coefficient 1d int gR, //input gaussian radius int inH, int inW){ //input image dims const int x=blockIdx.x*blockDim.x+threadIdx.x; const int y=blockIdx.y*blockDim.y+threadIdx.y; float accu = 0.0f; int ii=0; if (x>=gR && y>=gR && y<inH+gR && x<inW+gR){ //for loop starts at gR for i and j... for(ii = (-1 * gR); ii < (gR + 1); ii++){ // convolute with gaussian window // note: srcPadded is unsigned char accu += coef1d[ii + gR] * (float)input[y*(inW+gR*2)+x+ii]; } output[(y)*(inW+gR*2)+(x)] = accu; } } } extern "C" { __global__ void GPUGaussY(float* input, //input image (intermediate) float* output, //output image (blurred) float* coef1d, //input gaussian coefficient 1d int gR, //input gaussian radius int inH, int inW){ //input image dims float accu = 0.0f; int ii=0; const int x=blockIdx.x*blockDim.x+threadIdx.x; const int y=blockIdx.y*blockDim.y+threadIdx.y; if (x>=gR && y>=gR && y<inH+gR && x<inW+gR){ //for loop starts at gR for i and j... for(ii = (-1 * gR); ii < (gR + 1); ii++){ // convolute with gaussian window accu += coef1d[ii + gR] * (float) input[(y+ii)*(inW+gR*2)+x]; } output[(y-gR)*inW+(x-gR)] = accu; } } }
ba3cda6ec29266dd81de169d3bfdab2db36e70d5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #define DATATYPE int #define SMEMSIZE 512 #define REP 128 texture <int,1,hipReadModeElementType> texref1; texture <int,1,hipReadModeElementType> texref2; __global__ void texture_order_1(double *time,DATATYPE *out,int its) { DATATYPE p,q=threadIdx.x; double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=tex1Dfetch(texref1,q); q=tex1Dfetch(texref2,p); } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx) { int its=30; DATATYPE *d_in1,*d_in2; hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); hipBindTexture(NULL,texref1,d_in1,sizeof(DATATYPE)*SMEMSIZE); hipBindTexture(NULL,texref2,d_in2,sizeof(DATATYPE)*SMEMSIZE); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); hipMalloc((void**)&d_time,sizeof(double)*blocks*threads); hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); hipLaunchKernelGGL(( texture_order_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_out,its); hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt); hipUnbindTexture(texref1); hipUnbindTexture(texref2); hipFree(d_time); hipFree(d_out); hipFree(d_in1); hipFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } void init_disordered_32(DATATYPE *a,int n) { DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<n;i+=32) { for (int j=0;j<32;j++) { int jj=rand()%(32-j); a[i+j]=p[jj]; for (int k=jj;k<(32-j);k++) { p[k]=p[k+1]; } } for (int j=0;j<32;j++) { p[j]=a[i+j]; a[i+j]+=i; } } } void init_disordered_512(DATATYPE *a,int n) { const int nn=n/32; DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn); DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n); init_order(q,nn); for (int i=0;i<n;i+=nn) { for (int j=0;j<nn;j++) { int jj=rand()%(nn-j); b[i+j]=q[jj]; for (int k=jj;k<(nn-j);k++) { q[k]=q[k+1]; } } for (int j=0;j<nn;j++) { q[j]=b[i+j]; } } DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<32;i++) { for (int j=0;j<nn;j++) { a[j*32+i]=b[i*nn+j]*32+p[i]; } } free(q); free(b); } int main() { DATATYPE *h_in1, *h_in2, *h_in3; h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); init_order(h_in1, SMEMSIZE); init_disordered_32(h_in2, SMEMSIZE); init_disordered_512(h_in3, SMEMSIZE); printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); int blocks = 1; for (int j = 0; j <= 512; j += 32) { int threads = (j == 0 ? 1 : j); main_test(blocks, threads, h_in1, h_in1, 1); main_test(blocks, threads, h_in2, h_in2, 2); main_test(blocks, threads, h_in3, h_in3, 3); } free(h_in1); free(h_in2); free(h_in3); return 0; }
ba3cda6ec29266dd81de169d3bfdab2db36e70d5.cu
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #define DATATYPE int #define SMEMSIZE 512 #define REP 128 texture <int,1,cudaReadModeElementType> texref1; texture <int,1,cudaReadModeElementType> texref2; __global__ void texture_order_1(double *time,DATATYPE *out,int its) { DATATYPE p,q=threadIdx.x; double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=tex1Dfetch(texref1,q); q=tex1Dfetch(texref2,p); } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx) { int its=30; DATATYPE *d_in1,*d_in2; cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); cudaBindTexture(NULL,texref1,d_in1,sizeof(DATATYPE)*SMEMSIZE); cudaBindTexture(NULL,texref2,d_in2,sizeof(DATATYPE)*SMEMSIZE); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads); cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); texture_order_1<<<blocks,threads>>>(d_time,d_out,its); cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt); cudaUnbindTexture(texref1); cudaUnbindTexture(texref2); cudaFree(d_time); cudaFree(d_out); cudaFree(d_in1); cudaFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } void init_disordered_32(DATATYPE *a,int n) { DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<n;i+=32) { for (int j=0;j<32;j++) { int jj=rand()%(32-j); a[i+j]=p[jj]; for (int k=jj;k<(32-j);k++) { p[k]=p[k+1]; } } for (int j=0;j<32;j++) { p[j]=a[i+j]; a[i+j]+=i; } } } void init_disordered_512(DATATYPE *a,int n) { const int nn=n/32; DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn); DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n); init_order(q,nn); for (int i=0;i<n;i+=nn) { for (int j=0;j<nn;j++) { int jj=rand()%(nn-j); b[i+j]=q[jj]; for (int k=jj;k<(nn-j);k++) { q[k]=q[k+1]; } } for (int j=0;j<nn;j++) { q[j]=b[i+j]; } } DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<32;i++) { for (int j=0;j<nn;j++) { a[j*32+i]=b[i*nn+j]*32+p[i]; } } free(q); free(b); } int main() { DATATYPE *h_in1, *h_in2, *h_in3; h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE); init_order(h_in1, SMEMSIZE); init_disordered_32(h_in2, SMEMSIZE); init_disordered_512(h_in3, SMEMSIZE); printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); int blocks = 1; for (int j = 0; j <= 512; j += 32) { int threads = (j == 0 ? 1 : j); main_test(blocks, threads, h_in1, h_in1, 1); main_test(blocks, threads, h_in2, h_in2, 2); main_test(blocks, threads, h_in3, h_in3, 3); } free(h_in1); free(h_in2); free(h_in3); return 0; }
648d71b27dda7b0784f958c69a1a37d1cc0820af.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "add.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int values = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b,values); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b,values); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b,values); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
648d71b27dda7b0784f958c69a1a37d1cc0820af.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int values = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); add<<<gridBlock,threadBlock>>>(c,a,b,values); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { add<<<gridBlock,threadBlock>>>(c,a,b,values); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { add<<<gridBlock,threadBlock>>>(c,a,b,values); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2f76e4c2a9e786840711f99947b1e3b4a154ce3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma (iuriish@yahoo.com) // #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G adjustHueCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const T delta, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; T h, s, v; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], h, s, v); h += delta ; if(h > 1) h -= 1; else if(h < 0) h += 1; hsvToRgb<T>(h, s, v, zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void adjustHueCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const NDArray* deltaScalarArr, const int dimC) { hipLaunchKernelGGL(( adjustHueCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, deltaScalarArr->e<T>(0), dimC); } //////////////////////////////////////////////////////////////////////// void adjustHue(sd::LaunchContext* context, const NDArray *input, const NDArray* deltaScalarArr, NDArray *output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "adjustHue"); NDArray::prepareSpecialUse({output}, {input, deltaScalarArr}); BUILD_SINGLE_SELECTOR(input->dataType(), adjustHueCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, deltaScalarArr, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, deltaScalarArr}); manager.synchronize(); } /* template <typename T> static void _CUDA_G adjustHueSingleNHWCKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto bIn = reinterpret_cast<T*>(xBuffer); auto bOut = reinterpret_cast<T*>(zBuffer); static const int kChannelRange = 6; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto i = bIn + e * numChannels; auto o = bOut + e * numChannels; T h, v_min, v_max; helpers::rgb_to_hv(i[0], i[1], i[2], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0.) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, o, o + 1, o + 2); } } template <typename T> static void _CUDA_G adjustHueSingleNCHWKernel(void *xBuffer, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, void *zBuffer, Nd4jLong *zTadShapeInfo, Nd4jLong *zOffsets, Nd4jLong tadLength, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; static const int kChannelRange = 6; auto bufferR = reinterpret_cast<T *>(xBuffer) + xOffsets[0]; auto bufferG = reinterpret_cast<T *>(xBuffer) + xOffsets[1]; auto bufferB = reinterpret_cast<T *>(xBuffer) + xOffsets[2]; auto outputR = reinterpret_cast<T *>(zBuffer) + zOffsets[0]; auto outputG = reinterpret_cast<T *>(zBuffer) + zOffsets[1]; auto outputB = reinterpret_cast<T *>(zBuffer) + zOffsets[2]; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto _ri = bufferR + shape::getIndexOffset(e, xTadShapeInfo); auto _gi = bufferG + shape::getIndexOffset(e, xTadShapeInfo); auto _bi = bufferB + shape::getIndexOffset(e, xTadShapeInfo); auto _ro = outputR + shape::getIndexOffset(e, xTadShapeInfo); auto _go = outputG + shape::getIndexOffset(e, xTadShapeInfo); auto _bo = outputB + shape::getIndexOffset(e, xTadShapeInfo); T h, v_min, v_max; helpers::rgb_to_hv(_ri[0], _gi[0], _bi[0], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, _ro, _go, _bo); } } template <typename T> static void _adjust_hue_single(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { adjustHueSingleNHWCKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), array->specialShapeInfo(), output->specialBuffer(), output->special(), tuples, delta); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(array->shapeInfo(), {1, 2}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {1, 2}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } template <typename T> static void _adjust_hue_batch(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { auto xType = array->dataType(); // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { // in case of nhwc batch, we don't really care about examples: it's still bunch of RGB values BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, delta, isNHWC);, FLOAT_TYPES); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(array->shapeInfo(), {0, 2, 3}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {0, 2, 3}); auto tadLength = shape::length(packX.primary()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platform(), packZ.platform(), tadLength, tuples, delta); } } void _adjust_hue(sd::LaunchContext * context, NDArray *array, NDArray *output, NDArray* delta, bool isNHWC) { auto xType = array->dataType(); float d = delta->e<float>(0); if (array->rankOf() == 4) { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_batch, (context, array, output, d, isNHWC);, FLOAT_TYPES); } else { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, d, isNHWC);, FLOAT_TYPES); } } */ } } }
2f76e4c2a9e786840711f99947b1e3b4a154ce3a.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma (iuriish@yahoo.com) // #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G adjustHueCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const T delta, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; T h, s, v; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], h, s, v); h += delta ; if(h > 1) h -= 1; else if(h < 0) h += 1; hsvToRgb<T>(h, s, v, zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void adjustHueCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const NDArray* deltaScalarArr, const int dimC) { adjustHueCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, deltaScalarArr->e<T>(0), dimC); } //////////////////////////////////////////////////////////////////////// void adjustHue(sd::LaunchContext* context, const NDArray *input, const NDArray* deltaScalarArr, NDArray *output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "adjustHue"); NDArray::prepareSpecialUse({output}, {input, deltaScalarArr}); BUILD_SINGLE_SELECTOR(input->dataType(), adjustHueCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, deltaScalarArr, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, deltaScalarArr}); manager.synchronize(); } /* template <typename T> static void _CUDA_G adjustHueSingleNHWCKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto bIn = reinterpret_cast<T*>(xBuffer); auto bOut = reinterpret_cast<T*>(zBuffer); static const int kChannelRange = 6; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto i = bIn + e * numChannels; auto o = bOut + e * numChannels; T h, v_min, v_max; helpers::rgb_to_hv(i[0], i[1], i[2], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0.) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, o, o + 1, o + 2); } } template <typename T> static void _CUDA_G adjustHueSingleNCHWKernel(void *xBuffer, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, void *zBuffer, Nd4jLong *zTadShapeInfo, Nd4jLong *zOffsets, Nd4jLong tadLength, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; static const int kChannelRange = 6; auto bufferR = reinterpret_cast<T *>(xBuffer) + xOffsets[0]; auto bufferG = reinterpret_cast<T *>(xBuffer) + xOffsets[1]; auto bufferB = reinterpret_cast<T *>(xBuffer) + xOffsets[2]; auto outputR = reinterpret_cast<T *>(zBuffer) + zOffsets[0]; auto outputG = reinterpret_cast<T *>(zBuffer) + zOffsets[1]; auto outputB = reinterpret_cast<T *>(zBuffer) + zOffsets[2]; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto _ri = bufferR + shape::getIndexOffset(e, xTadShapeInfo); auto _gi = bufferG + shape::getIndexOffset(e, xTadShapeInfo); auto _bi = bufferB + shape::getIndexOffset(e, xTadShapeInfo); auto _ro = outputR + shape::getIndexOffset(e, xTadShapeInfo); auto _go = outputG + shape::getIndexOffset(e, xTadShapeInfo); auto _bo = outputB + shape::getIndexOffset(e, xTadShapeInfo); T h, v_min, v_max; helpers::rgb_to_hv(_ri[0], _gi[0], _bi[0], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, _ro, _go, _bo); } } template <typename T> static void _adjust_hue_single(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { adjustHueSingleNHWCKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), array->specialShapeInfo(), output->specialBuffer(), output->special(), tuples, delta); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(array->shapeInfo(), {1, 2}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {1, 2}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } template <typename T> static void _adjust_hue_batch(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { auto xType = array->dataType(); // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { // in case of nhwc batch, we don't really care about examples: it's still bunch of RGB values BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, delta, isNHWC);, FLOAT_TYPES); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(array->shapeInfo(), {0, 2, 3}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {0, 2, 3}); auto tadLength = shape::length(packX.primary()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platform(), packZ.platform(), tadLength, tuples, delta); } } void _adjust_hue(sd::LaunchContext * context, NDArray *array, NDArray *output, NDArray* delta, bool isNHWC) { auto xType = array->dataType(); float d = delta->e<float>(0); if (array->rankOf() == 4) { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_batch, (context, array, output, d, isNHWC);, FLOAT_TYPES); } else { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, d, isNHWC);, FLOAT_TYPES); } } */ } } }
b4df84ebbb3c2ea62a86b77bac2f410fbf8cabc3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This software is Copyright (c) 2011 Lukas Odzioba <lukas dot odzioba at gmail dot com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #include <stdio.h> #include "../cuda_cryptsha512.h" #include "cuda_common.cuh" __constant__ uint64_t k[] = { 0x428a2f98d728ae22LL, 0x7137449123ef65cdLL, 0xb5c0fbcfec4d3b2fLL, 0xe9b5dba58189dbbcLL, 0x3956c25bf348b538LL, 0x59f111f1b605d019LL, 0x923f82a4af194f9bLL, 0xab1c5ed5da6d8118LL, 0xd807aa98a3030242LL, 0x12835b0145706fbeLL, 0x243185be4ee4b28cLL, 0x550c7dc3d5ffb4e2LL, 0x72be5d74f27b896fLL, 0x80deb1fe3b1696b1LL, 0x9bdc06a725c71235LL, 0xc19bf174cf692694LL, 0xe49b69c19ef14ad2LL, 0xefbe4786384f25e3LL, 0x0fc19dc68b8cd5b5LL, 0x240ca1cc77ac9c65LL, 0x2de92c6f592b0275LL, 0x4a7484aa6ea6e483LL, 0x5cb0a9dcbd41fbd4LL, 0x76f988da831153b5LL, 0x983e5152ee66dfabLL, 0xa831c66d2db43210LL, 0xb00327c898fb213fLL, 0xbf597fc7beef0ee4LL, 0xc6e00bf33da88fc2LL, 0xd5a79147930aa725LL, 0x06ca6351e003826fLL, 0x142929670a0e6e70LL, 0x27b70a8546d22ffcLL, 0x2e1b21385c26c926LL, 0x4d2c6dfc5ac42aedLL, 0x53380d139d95b3dfLL, 0x650a73548baf63deLL, 0x766a0abb3c77b2a8LL, 0x81c2c92e47edaee6LL, 0x92722c851482353bLL, 0xa2bfe8a14cf10364LL, 0xa81a664bbc423001LL, 0xc24b8b70d0f89791LL, 0xc76c51a30654be30LL, 0xd192e819d6ef5218LL, 0xd69906245565a910LL, 0xf40e35855771202aLL, 0x106aa07032bbd1b8LL, 0x19a4c116b8d2d0c8LL, 0x1e376c085141ab53LL, 0x2748774cdf8eeb99LL, 0x34b0bcb5e19b48a8LL, 0x391c0cb3c5c95a63LL, 0x4ed8aa4ae3418acbLL, 0x5b9cca4f7763e373LL, 0x682e6ff3d6b2b8a3LL, 0x748f82ee5defb2fcLL, 0x78a5636f43172f60LL, 0x84c87814a1f0ab72LL, 0x8cc702081a6439ecLL, 0x90befffa23631e28LL, 0xa4506cebde82bde9LL, 0xbef9a3f7b2c67915LL, 0xc67178f2e372532bLL, 0xca273eceea26619cLL, 0xd186b8c721c0c207LL, 0xeada7dd6cde0eb1eLL, 0xf57d4f7fee6ed178LL, 0x06f067aa72176fbaLL, 0x0a637dc5a2c898a6LL, 0x113f9804bef90daeLL, 0x1b710b35131c471bLL, 0x28db77f523047d84LL, 0x32caab7b40c72493LL, 0x3c9ebe0a15c9bebcLL, 0x431d67c49c100d4cLL, 0x4cc5d4becb3e42b6LL, 0x597f299cfc657e2aLL, 0x5fcb6fab3ad6faecLL, 0x6c44198c4a475817LL, }; __constant__ crypt_sha512_salt cuda_salt[1]; extern "C" void sha512_crypt_gpu(crypt_sha512_password * inbuffer, crypt_sha512_hash * outbuffer, crypt_sha512_salt * salt); __device__ void init_ctx(sha512_ctx * ctx) { ctx->H[0] = 0x6a09e667f3bcc908LL; ctx->H[1] = 0xbb67ae8584caa73bLL; ctx->H[2] = 0x3c6ef372fe94f82bLL; ctx->H[3] = 0xa54ff53a5f1d36f1LL; ctx->H[4] = 0x510e527fade682d1LL; ctx->H[5] = 0x9b05688c2b3e6c1fLL; ctx->H[6] = 0x1f83d9abfb41bd6bLL; ctx->H[7] = 0x5be0cd19137e2179LL; ctx->total = 0; ctx->buflen = 0; } __device__ void insert_to_buffer(sha512_ctx * ctx, const uint8_t * string, uint8_t len) { uint8_t *d = &ctx->buffer[ctx->buflen]; memcpy(d,string,len); ctx->buflen += len; } __device__ void sha512_block(sha512_ctx * ctx) { int i; uint64_t a = ctx->H[0]; uint64_t b = ctx->H[1]; uint64_t c = ctx->H[2]; uint64_t d = ctx->H[3]; uint64_t e = ctx->H[4]; uint64_t f = ctx->H[5]; uint64_t g = ctx->H[6]; uint64_t h = ctx->H[7]; uint64_t w[16]; uint64_t *data = (uint64_t *) ctx->buffer; //#pragma unroll 16 for (i = 0; i < 16; i++) w[i] = SWAP64(data[i]); uint64_t t1, t2; //#pragma unroll 16 for (i = 0; i < 16; i++) { t1 = k[i] + w[i] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } for (i = 16; i < 80; i++) { w[i & 15] =sigma1(w[(i - 2) & 15]) + sigma0(w[(i - 15) & 15]) + w[(i -16) & 15] + w[(i - 7) & 15]; t1 = k[i] + w[i & 15] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->H[0] += a; ctx->H[1] += b; ctx->H[2] += c; ctx->H[3] += d; ctx->H[4] += e; ctx->H[5] += f; ctx->H[6] += g; ctx->H[7] += h; } __device__ void ctx_append_1(sha512_ctx * ctx) { uint32_t length=ctx->buflen; int i = 127 - length; uint32_t *x = (uint32_t *) ctx->buffer; uint8_t *d = &ctx->buffer[length]; *d++ = 0x80; while(++length%4!=0) { *d++=0; i--; } x=(uint32_t*)d; while(i>0) { i-=4; *x++=0; } } __device__ void ctx_add_length(sha512_ctx * ctx) { uint64_t *blocks = (uint64_t *) ctx->buffer; blocks[15] = SWAP64((uint64_t) ctx->total * 8); } __device__ void finish_ctx(sha512_ctx * ctx) { ctx_append_1(ctx); ctx_add_length(ctx); ctx->buflen = 0; } __device__ void ctx_update(sha512_ctx * ctx, const char *string, uint8_t len) { ctx->total += len; uint8_t startpos = ctx->buflen; uint8_t partsize; if (startpos + len <= 128) { partsize = len; } else partsize = 128 - startpos; insert_to_buffer(ctx, (const uint8_t *) string, partsize); if (ctx->buflen == 128) { uint8_t offset = 128 - startpos; sha512_block(ctx); ctx->buflen = 0; insert_to_buffer(ctx, (const uint8_t *) (string + offset), len - offset); } } __device__ void clear_ctx_buffer(sha512_ctx * ctx) { uint32_t *w = (uint32_t *) ctx->buffer; //#pragma unroll 30 for (int i = 0; i < 30; i++) w[i] = 0; ctx->buflen = 0; } __device__ void sha512_digest(sha512_ctx * ctx, uint64_t * result) { uint8_t i; if (ctx->buflen <= 111) { //data+0x80+datasize fits in one 1024bit block finish_ctx(ctx); sha512_block(ctx); } else { uint8_t moved = 1; if (ctx->buflen < 128) { //data and 0x80 fits in one block ctx_append_1(ctx); moved = 0; } sha512_block(ctx); clear_ctx_buffer(ctx); if (moved) ctx->buffer[0] = 0x80; //append 1,the rest is already clean ctx_add_length(ctx); sha512_block(ctx); } //#pragma unroll 8 for (i = 0; i < 8; i++) result[i] = SWAP64(ctx->H[i]); } __device__ void sha512crypt(const char *pass, uint8_t passlength, uint64_t * tresult, uint32_t idx, uint32_t rounds) { uint64_t alt_result[8], temp_result[8]; int i; sha512_ctx ctx; init_ctx(&ctx); ctx_update(&ctx, pass, passlength); ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen); ctx_update(&ctx, pass, passlength); sha512_digest(&ctx, alt_result); init_ctx(&ctx); ctx_update(&ctx, pass, passlength); ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen); ctx_update(&ctx, (const char *) alt_result, passlength); for (i = passlength; i > 0; i >>= 1) { if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result, 64); else ctx_update(&ctx, pass, passlength); } sha512_digest(&ctx, alt_result); init_ctx(&ctx); for (i = 0; i < passlength; i++) ctx_update(&ctx, pass, passlength); sha512_digest(&ctx, temp_result); __shared__ char sp_sequence[THREADS][16+4]; char *p_sequence=sp_sequence[threadIdx.x]; memcpy(p_sequence, temp_result, passlength); init_ctx(&ctx); for (i = 0; i < 16 + ((unsigned char *) alt_result)[0]; i++) ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen); sha512_digest(&ctx, temp_result); uint8_t saltlength = cuda_salt[0].saltlen; __shared__ char ss_sequence[THREADS][16+4]; char *s_sequence=ss_sequence[threadIdx.x]; memcpy(s_sequence, temp_result, saltlength); for (i = 0; i < rounds; i++) { init_ctx(&ctx); if ((i & 1) != 0) ctx_update(&ctx, p_sequence, passlength); else ctx_update(&ctx, (const char *) alt_result, 64); if ((i % 3) != 0) ctx_update(&ctx, s_sequence, saltlength); if ((i % 7) != 0) ctx_update(&ctx, p_sequence, passlength); if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result, 64); else ctx_update(&ctx, p_sequence, passlength); sha512_digest(&ctx, alt_result); } //#pragma unroll 8 for (i = 0; i < 8; i++) tresult[i] = alt_result[i]; } __global__ void kernel_crypt_r(crypt_sha512_password * inbuffer, crypt_sha512_hash * outbuffer) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; sha512crypt((const char *) inbuffer[idx].v, inbuffer[idx].length, outbuffer[idx].v, idx, cuda_salt[0].rounds); } void sha512_crypt_gpu(crypt_sha512_password * inbuffer, crypt_sha512_hash * outbuffer, crypt_sha512_salt * host_salt) { crypt_sha512_password *cuda_inbuffer; crypt_sha512_hash *cuda_outbuffer; size_t insize = sizeof(crypt_sha512_password) * KEYS_PER_CRYPT; size_t outsize = sizeof(crypt_sha512_hash) * KEYS_PER_CRYPT; HANDLE_ERROR(hipMalloc(&cuda_inbuffer, insize)); HANDLE_ERROR(hipMalloc(&cuda_outbuffer, outsize)); HANDLE_ERROR(hipMemcpy(cuda_inbuffer, inbuffer, insize, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpyToSymbol(cuda_salt, host_salt, sizeof(crypt_sha512_salt))); dim3 dimGrid(BLOCKS); dim3 dimBlock(THREADS); hipLaunchKernelGGL(( kernel_crypt_r) , dim3(dimGrid), dim3(dimBlock) , 0, 0, cuda_inbuffer, cuda_outbuffer); hipDeviceSynchronize(); HANDLE_ERROR(hipMemcpy(outbuffer, cuda_outbuffer, outsize, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipFree(cuda_inbuffer)); HANDLE_ERROR(hipFree(cuda_outbuffer)); }
b4df84ebbb3c2ea62a86b77bac2f410fbf8cabc3.cu
/* * This software is Copyright (c) 2011 Lukas Odzioba <lukas dot odzioba at gmail dot com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. */ #include <stdio.h> #include "../cuda_cryptsha512.h" #include "cuda_common.cuh" __constant__ uint64_t k[] = { 0x428a2f98d728ae22LL, 0x7137449123ef65cdLL, 0xb5c0fbcfec4d3b2fLL, 0xe9b5dba58189dbbcLL, 0x3956c25bf348b538LL, 0x59f111f1b605d019LL, 0x923f82a4af194f9bLL, 0xab1c5ed5da6d8118LL, 0xd807aa98a3030242LL, 0x12835b0145706fbeLL, 0x243185be4ee4b28cLL, 0x550c7dc3d5ffb4e2LL, 0x72be5d74f27b896fLL, 0x80deb1fe3b1696b1LL, 0x9bdc06a725c71235LL, 0xc19bf174cf692694LL, 0xe49b69c19ef14ad2LL, 0xefbe4786384f25e3LL, 0x0fc19dc68b8cd5b5LL, 0x240ca1cc77ac9c65LL, 0x2de92c6f592b0275LL, 0x4a7484aa6ea6e483LL, 0x5cb0a9dcbd41fbd4LL, 0x76f988da831153b5LL, 0x983e5152ee66dfabLL, 0xa831c66d2db43210LL, 0xb00327c898fb213fLL, 0xbf597fc7beef0ee4LL, 0xc6e00bf33da88fc2LL, 0xd5a79147930aa725LL, 0x06ca6351e003826fLL, 0x142929670a0e6e70LL, 0x27b70a8546d22ffcLL, 0x2e1b21385c26c926LL, 0x4d2c6dfc5ac42aedLL, 0x53380d139d95b3dfLL, 0x650a73548baf63deLL, 0x766a0abb3c77b2a8LL, 0x81c2c92e47edaee6LL, 0x92722c851482353bLL, 0xa2bfe8a14cf10364LL, 0xa81a664bbc423001LL, 0xc24b8b70d0f89791LL, 0xc76c51a30654be30LL, 0xd192e819d6ef5218LL, 0xd69906245565a910LL, 0xf40e35855771202aLL, 0x106aa07032bbd1b8LL, 0x19a4c116b8d2d0c8LL, 0x1e376c085141ab53LL, 0x2748774cdf8eeb99LL, 0x34b0bcb5e19b48a8LL, 0x391c0cb3c5c95a63LL, 0x4ed8aa4ae3418acbLL, 0x5b9cca4f7763e373LL, 0x682e6ff3d6b2b8a3LL, 0x748f82ee5defb2fcLL, 0x78a5636f43172f60LL, 0x84c87814a1f0ab72LL, 0x8cc702081a6439ecLL, 0x90befffa23631e28LL, 0xa4506cebde82bde9LL, 0xbef9a3f7b2c67915LL, 0xc67178f2e372532bLL, 0xca273eceea26619cLL, 0xd186b8c721c0c207LL, 0xeada7dd6cde0eb1eLL, 0xf57d4f7fee6ed178LL, 0x06f067aa72176fbaLL, 0x0a637dc5a2c898a6LL, 0x113f9804bef90daeLL, 0x1b710b35131c471bLL, 0x28db77f523047d84LL, 0x32caab7b40c72493LL, 0x3c9ebe0a15c9bebcLL, 0x431d67c49c100d4cLL, 0x4cc5d4becb3e42b6LL, 0x597f299cfc657e2aLL, 0x5fcb6fab3ad6faecLL, 0x6c44198c4a475817LL, }; __constant__ crypt_sha512_salt cuda_salt[1]; extern "C" void sha512_crypt_gpu(crypt_sha512_password * inbuffer, crypt_sha512_hash * outbuffer, crypt_sha512_salt * salt); __device__ void init_ctx(sha512_ctx * ctx) { ctx->H[0] = 0x6a09e667f3bcc908LL; ctx->H[1] = 0xbb67ae8584caa73bLL; ctx->H[2] = 0x3c6ef372fe94f82bLL; ctx->H[3] = 0xa54ff53a5f1d36f1LL; ctx->H[4] = 0x510e527fade682d1LL; ctx->H[5] = 0x9b05688c2b3e6c1fLL; ctx->H[6] = 0x1f83d9abfb41bd6bLL; ctx->H[7] = 0x5be0cd19137e2179LL; ctx->total = 0; ctx->buflen = 0; } __device__ void insert_to_buffer(sha512_ctx * ctx, const uint8_t * string, uint8_t len) { uint8_t *d = &ctx->buffer[ctx->buflen]; memcpy(d,string,len); ctx->buflen += len; } __device__ void sha512_block(sha512_ctx * ctx) { int i; uint64_t a = ctx->H[0]; uint64_t b = ctx->H[1]; uint64_t c = ctx->H[2]; uint64_t d = ctx->H[3]; uint64_t e = ctx->H[4]; uint64_t f = ctx->H[5]; uint64_t g = ctx->H[6]; uint64_t h = ctx->H[7]; uint64_t w[16]; uint64_t *data = (uint64_t *) ctx->buffer; //#pragma unroll 16 for (i = 0; i < 16; i++) w[i] = SWAP64(data[i]); uint64_t t1, t2; //#pragma unroll 16 for (i = 0; i < 16; i++) { t1 = k[i] + w[i] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } for (i = 16; i < 80; i++) { w[i & 15] =sigma1(w[(i - 2) & 15]) + sigma0(w[(i - 15) & 15]) + w[(i -16) & 15] + w[(i - 7) & 15]; t1 = k[i] + w[i & 15] + h + Sigma1(e) + Ch(e, f, g); t2 = Maj(a, b, c) + Sigma0(a); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->H[0] += a; ctx->H[1] += b; ctx->H[2] += c; ctx->H[3] += d; ctx->H[4] += e; ctx->H[5] += f; ctx->H[6] += g; ctx->H[7] += h; } __device__ void ctx_append_1(sha512_ctx * ctx) { uint32_t length=ctx->buflen; int i = 127 - length; uint32_t *x = (uint32_t *) ctx->buffer; uint8_t *d = &ctx->buffer[length]; *d++ = 0x80; while(++length%4!=0) { *d++=0; i--; } x=(uint32_t*)d; while(i>0) { i-=4; *x++=0; } } __device__ void ctx_add_length(sha512_ctx * ctx) { uint64_t *blocks = (uint64_t *) ctx->buffer; blocks[15] = SWAP64((uint64_t) ctx->total * 8); } __device__ void finish_ctx(sha512_ctx * ctx) { ctx_append_1(ctx); ctx_add_length(ctx); ctx->buflen = 0; } __device__ void ctx_update(sha512_ctx * ctx, const char *string, uint8_t len) { ctx->total += len; uint8_t startpos = ctx->buflen; uint8_t partsize; if (startpos + len <= 128) { partsize = len; } else partsize = 128 - startpos; insert_to_buffer(ctx, (const uint8_t *) string, partsize); if (ctx->buflen == 128) { uint8_t offset = 128 - startpos; sha512_block(ctx); ctx->buflen = 0; insert_to_buffer(ctx, (const uint8_t *) (string + offset), len - offset); } } __device__ void clear_ctx_buffer(sha512_ctx * ctx) { uint32_t *w = (uint32_t *) ctx->buffer; //#pragma unroll 30 for (int i = 0; i < 30; i++) w[i] = 0; ctx->buflen = 0; } __device__ void sha512_digest(sha512_ctx * ctx, uint64_t * result) { uint8_t i; if (ctx->buflen <= 111) { //data+0x80+datasize fits in one 1024bit block finish_ctx(ctx); sha512_block(ctx); } else { uint8_t moved = 1; if (ctx->buflen < 128) { //data and 0x80 fits in one block ctx_append_1(ctx); moved = 0; } sha512_block(ctx); clear_ctx_buffer(ctx); if (moved) ctx->buffer[0] = 0x80; //append 1,the rest is already clean ctx_add_length(ctx); sha512_block(ctx); } //#pragma unroll 8 for (i = 0; i < 8; i++) result[i] = SWAP64(ctx->H[i]); } __device__ void sha512crypt(const char *pass, uint8_t passlength, uint64_t * tresult, uint32_t idx, uint32_t rounds) { uint64_t alt_result[8], temp_result[8]; int i; sha512_ctx ctx; init_ctx(&ctx); ctx_update(&ctx, pass, passlength); ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen); ctx_update(&ctx, pass, passlength); sha512_digest(&ctx, alt_result); init_ctx(&ctx); ctx_update(&ctx, pass, passlength); ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen); ctx_update(&ctx, (const char *) alt_result, passlength); for (i = passlength; i > 0; i >>= 1) { if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result, 64); else ctx_update(&ctx, pass, passlength); } sha512_digest(&ctx, alt_result); init_ctx(&ctx); for (i = 0; i < passlength; i++) ctx_update(&ctx, pass, passlength); sha512_digest(&ctx, temp_result); __shared__ char sp_sequence[THREADS][16+4]; char *p_sequence=sp_sequence[threadIdx.x]; memcpy(p_sequence, temp_result, passlength); init_ctx(&ctx); for (i = 0; i < 16 + ((unsigned char *) alt_result)[0]; i++) ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen); sha512_digest(&ctx, temp_result); uint8_t saltlength = cuda_salt[0].saltlen; __shared__ char ss_sequence[THREADS][16+4]; char *s_sequence=ss_sequence[threadIdx.x]; memcpy(s_sequence, temp_result, saltlength); for (i = 0; i < rounds; i++) { init_ctx(&ctx); if ((i & 1) != 0) ctx_update(&ctx, p_sequence, passlength); else ctx_update(&ctx, (const char *) alt_result, 64); if ((i % 3) != 0) ctx_update(&ctx, s_sequence, saltlength); if ((i % 7) != 0) ctx_update(&ctx, p_sequence, passlength); if ((i & 1) != 0) ctx_update(&ctx, (const char *) alt_result, 64); else ctx_update(&ctx, p_sequence, passlength); sha512_digest(&ctx, alt_result); } //#pragma unroll 8 for (i = 0; i < 8; i++) tresult[i] = alt_result[i]; } __global__ void kernel_crypt_r(crypt_sha512_password * inbuffer, crypt_sha512_hash * outbuffer) { uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x; sha512crypt((const char *) inbuffer[idx].v, inbuffer[idx].length, outbuffer[idx].v, idx, cuda_salt[0].rounds); } void sha512_crypt_gpu(crypt_sha512_password * inbuffer, crypt_sha512_hash * outbuffer, crypt_sha512_salt * host_salt) { crypt_sha512_password *cuda_inbuffer; crypt_sha512_hash *cuda_outbuffer; size_t insize = sizeof(crypt_sha512_password) * KEYS_PER_CRYPT; size_t outsize = sizeof(crypt_sha512_hash) * KEYS_PER_CRYPT; HANDLE_ERROR(cudaMalloc(&cuda_inbuffer, insize)); HANDLE_ERROR(cudaMalloc(&cuda_outbuffer, outsize)); HANDLE_ERROR(cudaMemcpy(cuda_inbuffer, inbuffer, insize, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpyToSymbol(cuda_salt, host_salt, sizeof(crypt_sha512_salt))); dim3 dimGrid(BLOCKS); dim3 dimBlock(THREADS); kernel_crypt_r <<< dimGrid, dimBlock >>> (cuda_inbuffer, cuda_outbuffer); cudaThreadSynchronize(); HANDLE_ERROR(cudaMemcpy(outbuffer, cuda_outbuffer, outsize, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(cuda_inbuffer)); HANDLE_ERROR(cudaFree(cuda_outbuffer)); }
a42527490ec30a72e004eb2d0f1c19452054a955.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pairwise_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void FocalParameter(const int nthreads, const Dtype threshold, const Dtype* similarity, const Dtype* dot_product, Dtype* focal) { CUDA_KERNEL_LOOP(index, nthreads) { if(dot_product[index] > threshold){ if (similarity[index] == 0){ focal[index] = 1; } else{ focal[index] = 0; } } else if (dot_product[index] < -threshold){ if (similarity[index] == 0){ focal[index] = 0; } else{ focal[index] = 1; } } else{ /* if (similarity[index] == 0){ focal[index] = pow(1. / (1+exp(-dot_product[index])), 0.1); } else{ focal[index] = pow(1. - (1. / (1+exp(-dot_product[index]))), 0.1); } Dtype beta = Dtype(1); if (similarity[index] == 0){ focal[index] = pow(beta / (beta + dot_product[index], 0.1); } else{ focal[index] = pow(dot_product[index] / (beta + dot_product[index], 0.1) }*/ if (similarity[index] == 0){ focal[index] = pow((1. + dot_product[index]) / 2., 0.1); } else{ focal[index] = pow((1. - dot_product[index]) / 2., 0.1); } } } } template <typename Dtype> __global__ void EuclideanDistance(const int nthreads, const int outer_num, const int inner_num, const Dtype* code1, const Dtype* code2, Dtype* distance){ CUDA_KERNEL_LOOP(index, nthreads) { int index_id1 = index / outer_num; int index_id2 = index % outer_num; distance[index] = 0; for (int i = 0; i < inner_num; i++){ distance[index] += (code1[index_id1 * outer_num + i] - code2[index_id2 * outer_num + i]) * (code1[index_id1 * outer_num + i] - code2[index_id2 * outer_num + i]); } } } template <typename Dtype> __global__ void CosineDistance(const int nthreads, const int outer_num, const int inner_num, const Dtype* code1, const Dtype* code2, Dtype* distance){ CUDA_KERNEL_LOOP(index, nthreads) { int index_id1 = index / outer_num; int index_id2 = index % outer_num; distance[index] = 0; Dtype length1 = 0; Dtype length2 = 0; for (int i = 0; i < inner_num; i++){ length1 += code1[index_id1 * inner_num + i] * code1[index_id1 * inner_num + i]; length2 += code2[index_id2 * inner_num + i] * code2[index_id2 * inner_num + i]; } length1 = sqrt(length1); length2 = sqrt(length2); for (int i = 0; i < inner_num; i++){ distance[index] += (code1[index_id1 * inner_num + i] * code2[index_id2 * inner_num + i]); } distance[index] = distance[index] / (length1 * length2); if(distance[index] >= 1){ distance[index] = 0.99; } else if(distance[index] <= -1){ distance[index] = -0.99; } } } template <typename Dtype> __global__ void DegreeRowColumn(const int outer_num, const Dtype* similarity, Dtype* row, Dtype* column, Dtype* neg_row, Dtype* neg_column){ CUDA_KERNEL_LOOP(index, outer_num) { row[index] = 0; column[index] = 0; for (int i = 0; i < outer_num; i++){ row[index] += similarity[index*outer_num+i]; column[index] += similarity[i*outer_num + index]; neg_row[index] += 1-similarity[index*outer_num+i]; neg_column[index] += 1-similarity[i*outer_num + index]; } if (row[index] == 0){ row[index] = 1.; } else{ row[index] = Dtype(outer_num) / row[index]; } if (column[index] == 0){ column[index] = 1.; } else { column[index] = Dtype(outer_num) / column[index]; } if (neg_row[index] == 0){ neg_row[index] = 1.; } else { neg_row[index] = Dtype(outer_num) / neg_row[index]; } if (neg_column[index] == 0){ neg_column[index] = 1.; } else { neg_column[index] = Dtype(outer_num) / neg_column[index]; } } } template <typename Dtype> __global__ void SimilarityProcess(const int nthreads, Dtype* similarity, Dtype label_dim) { CUDA_KERNEL_LOOP(index, nthreads) { if((similarity[index] < 0) || (similarity[index] >= label_dim)){ //unknown label similarity[index] = Dtype(-1.0); } else if(similarity[index] > 0){ //similar label similarity[index] = Dtype(1.0); } } } template <typename Dtype> __global__ void ContinousSimilarityProcess(const int nthreads, const Dtype* similarity, const Dtype* similarity1, Dtype* similarity2, Dtype* sim, const int outer_num) { CUDA_KERNEL_LOOP(index, nthreads) { int data_id1 = index / outer_num; int data_id2 = index % outer_num; sim[index] = similarity[index] * similarity[index] / (similarity1[outer_num*data_id1+data_id1] * similarity2[outer_num*data_id2+data_id2]); if(sim[index] == 0){ sim[index] = 0.25; } } } template <typename Dtype> __global__ void RemoveZero(const int nthreads, Dtype* similarity1, Dtype* similarity2) { CUDA_KERNEL_LOOP(index, nthreads) { if(similarity1[index] == 0){ similarity1[index] = 1.0; } if(similarity2[index] == 0){ similarity2[index] = 1.0; } } } template <typename Dtype> __global__ void PairwiseLossForwardGPU(const int nthreads, const int num, const Dtype* similarity, const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data) { CUDA_KERNEL_LOOP(index, nthreads) { if(similarity[index] >= 0){ count[index] = Dtype(1.0); if((threshold >= 0) && (product[index] >= threshold)){ loss_data[index] = product[index] * (1 - (similarity[index] > 0)); } else{ loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index]; } if(similarity[index] > 0){ loss_data[index] = loss_data[index]; } } else{ count[index] = Dtype(0.0); loss_data[index] = Dtype(0.0); } } } template <typename Dtype> void PairwiseLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* similarity = loss_.mutable_gpu_data(); Dtype* dot_product = product_.mutable_gpu_data(); Dtype* exp_product = product_.mutable_gpu_diff(); Dtype* loss_data = loss_.mutable_gpu_diff(); Dtype* count = count_.mutable_gpu_data(); Dtype* similarity1 = own_similarity_.mutable_gpu_data(); Dtype* similarity2 = own_similarity_.mutable_gpu_diff(); Dtype* distance = distance_.mutable_gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_data1 = bottom[2]->gpu_data(); Dtype* label = bottom[1]->mutable_gpu_data(); Dtype* label1 = bottom[3]->mutable_gpu_data(); Dtype* row_weight = weight_vector_.mutable_gpu_data(); Dtype* column_weight = weight_vector_.mutable_gpu_diff(); Dtype* neg_row_weight = neg_weight_vector_.mutable_gpu_data(); Dtype* neg_column_weight = neg_weight_vector_.mutable_gpu_diff(); int nthreads = outer_num_ * outer_num_; Dtype loss, count_num; caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_, Dtype(1.0), label, label1, Dtype(0.0), similarity); if (continous_similarity_){ caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_, Dtype(1.0), label, label, Dtype(0.0), similarity1); caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_, Dtype(1.0), label1, label1, Dtype(0.0), similarity2); hipLaunchKernelGGL(( RemoveZero<Dtype>), dim3(CAFFE_GET_BLOCKS(own_similarity_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, own_similarity_.count(), similarity1, similarity2); hipLaunchKernelGGL(( ContinousSimilarityProcess<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, similarity, similarity1, similarity2, loss_data, outer_num_); caffe_gpu_memcpy(nthreads*sizeof(Dtype), loss_data, similarity1); } hipLaunchKernelGGL(( SimilarityProcess<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, similarity, label_dim_); hipLaunchKernelGGL(( DegreeRowColumn<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, similarity, row_weight, column_weight, neg_row_weight, neg_column_weight); caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, inner_num_, Dtype(1.0), bottom_data, bottom_data1, Dtype(0.0), dot_product); caffe_gpu_scal(outer_num_ * outer_num_, sigmoid_param_, dot_product); //calculate priority parameter start Dtype* focal_parameter = focal_.mutable_gpu_data(); hipLaunchKernelGGL(( CosineDistance<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, inner_num_, bottom_data, bottom_data1, distance); hipLaunchKernelGGL(( FocalParameter<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, 20., similarity, distance, focal_parameter); Dtype all_focal; caffe_gpu_asum(nthreads, focal_parameter, &all_focal); ave_focal_ = all_focal / Dtype(nthreads); //calculate priority parameter end caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product); hipLaunchKernelGGL(( PairwiseLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, similarity, exp_product, dot_product, l_threshold_, count, loss_data); //add priority start caffe_gpu_mul(nthreads, loss_data, focal_parameter, loss_data); caffe_gpu_scal(nthreads, Dtype(1. / ave_focal_), loss_data); //add priority end caffe_gpu_asum(nthreads, loss_data, &loss); caffe_gpu_asum(nthreads, count, &count_num); loss /= (count_num > 0 ? count_num : Dtype(1)); LOG(INFO) << "L loss:" << loss; loss = loss * (l_lambda_ > 0); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void PairwiseLossBackwardGPU(const int nthreads, const int num, const Dtype* similarity, const Dtype* exp_product, Dtype* count, Dtype* diff, const Dtype* row_weight, const Dtype* column_weight, const Dtype* neg_row_weight, const Dtype* neg_column_weight) { CUDA_KERNEL_LOOP(index, nthreads) { int i = index / num; int j = index % num; if(similarity[index] >= 0){ diff[index] = 2 * ( 1 / (1 + 1 / exp_product[index]) - (similarity[index] > 0) ); count[index] = Dtype(1.0); if(similarity[index] > 0){ diff[index] = diff[index] * sqrt(row_weight[i] * column_weight[j]); count[index] *= sqrt(row_weight[i] * column_weight[j]); } //else{ // diff[index] = diff[index] * sqrt(neg_row_weight[i] * neg_column_weight[j]); // count[index] *= sqrt(neg_row_weight[i] * neg_column_weight[j]); //} } else{ diff[index] = Dtype(0.0); count[index] = Dtype(0.0); } } } template <typename Dtype> void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* diff = count_.mutable_gpu_diff(); Dtype* count = count_.mutable_gpu_data(); const Dtype* similarity = loss_.gpu_data(); const Dtype* exp_product = product_.gpu_diff(); const Dtype* similarity1 = own_similarity_.gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom_diff1 = bottom[2]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_data1 = bottom[2]->gpu_data(); const Dtype* row_weight = weight_vector_.gpu_data(); const Dtype* column_weight = weight_vector_.gpu_diff(); const Dtype* neg_row_weight = neg_weight_vector_.gpu_data(); const Dtype* neg_column_weight = neg_weight_vector_.gpu_diff(); int nthreads = outer_num_ * outer_num_; //calculate diff hipLaunchKernelGGL(( PairwiseLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, similarity, exp_product, count, diff, row_weight, column_weight, neg_row_weight, neg_column_weight); // focal loss const Dtype* focal_parameter = focal_.gpu_data(); caffe_gpu_mul(nthreads, diff, focal_parameter, diff); caffe_gpu_scal(nthreads, Dtype(1. / ave_focal_), diff); // if(continous_similarity_){ caffe_gpu_mul(nthreads, diff, similarity1, diff); caffe_gpu_scal(nthreads, Dtype(4), diff); } //copy to bottom_diff Dtype count_num; caffe_gpu_asum(nthreads, count, &count_num); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_, l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data1, Dtype(0.0), bottom_diff); caffe_gpu_gemm(CblasTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_, l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data, Dtype(0.0), bottom_diff1); caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff); caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff1); } } INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer); } // namespace caffe
a42527490ec30a72e004eb2d0f1c19452054a955.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pairwise_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void FocalParameter(const int nthreads, const Dtype threshold, const Dtype* similarity, const Dtype* dot_product, Dtype* focal) { CUDA_KERNEL_LOOP(index, nthreads) { if(dot_product[index] > threshold){ if (similarity[index] == 0){ focal[index] = 1; } else{ focal[index] = 0; } } else if (dot_product[index] < -threshold){ if (similarity[index] == 0){ focal[index] = 0; } else{ focal[index] = 1; } } else{ /* if (similarity[index] == 0){ focal[index] = pow(1. / (1+exp(-dot_product[index])), 0.1); } else{ focal[index] = pow(1. - (1. / (1+exp(-dot_product[index]))), 0.1); } Dtype beta = Dtype(1); if (similarity[index] == 0){ focal[index] = pow(beta / (beta + dot_product[index], 0.1); } else{ focal[index] = pow(dot_product[index] / (beta + dot_product[index], 0.1) }*/ if (similarity[index] == 0){ focal[index] = pow((1. + dot_product[index]) / 2., 0.1); } else{ focal[index] = pow((1. - dot_product[index]) / 2., 0.1); } } } } template <typename Dtype> __global__ void EuclideanDistance(const int nthreads, const int outer_num, const int inner_num, const Dtype* code1, const Dtype* code2, Dtype* distance){ CUDA_KERNEL_LOOP(index, nthreads) { int index_id1 = index / outer_num; int index_id2 = index % outer_num; distance[index] = 0; for (int i = 0; i < inner_num; i++){ distance[index] += (code1[index_id1 * outer_num + i] - code2[index_id2 * outer_num + i]) * (code1[index_id1 * outer_num + i] - code2[index_id2 * outer_num + i]); } } } template <typename Dtype> __global__ void CosineDistance(const int nthreads, const int outer_num, const int inner_num, const Dtype* code1, const Dtype* code2, Dtype* distance){ CUDA_KERNEL_LOOP(index, nthreads) { int index_id1 = index / outer_num; int index_id2 = index % outer_num; distance[index] = 0; Dtype length1 = 0; Dtype length2 = 0; for (int i = 0; i < inner_num; i++){ length1 += code1[index_id1 * inner_num + i] * code1[index_id1 * inner_num + i]; length2 += code2[index_id2 * inner_num + i] * code2[index_id2 * inner_num + i]; } length1 = sqrt(length1); length2 = sqrt(length2); for (int i = 0; i < inner_num; i++){ distance[index] += (code1[index_id1 * inner_num + i] * code2[index_id2 * inner_num + i]); } distance[index] = distance[index] / (length1 * length2); if(distance[index] >= 1){ distance[index] = 0.99; } else if(distance[index] <= -1){ distance[index] = -0.99; } } } template <typename Dtype> __global__ void DegreeRowColumn(const int outer_num, const Dtype* similarity, Dtype* row, Dtype* column, Dtype* neg_row, Dtype* neg_column){ CUDA_KERNEL_LOOP(index, outer_num) { row[index] = 0; column[index] = 0; for (int i = 0; i < outer_num; i++){ row[index] += similarity[index*outer_num+i]; column[index] += similarity[i*outer_num + index]; neg_row[index] += 1-similarity[index*outer_num+i]; neg_column[index] += 1-similarity[i*outer_num + index]; } if (row[index] == 0){ row[index] = 1.; } else{ row[index] = Dtype(outer_num) / row[index]; } if (column[index] == 0){ column[index] = 1.; } else { column[index] = Dtype(outer_num) / column[index]; } if (neg_row[index] == 0){ neg_row[index] = 1.; } else { neg_row[index] = Dtype(outer_num) / neg_row[index]; } if (neg_column[index] == 0){ neg_column[index] = 1.; } else { neg_column[index] = Dtype(outer_num) / neg_column[index]; } } } template <typename Dtype> __global__ void SimilarityProcess(const int nthreads, Dtype* similarity, Dtype label_dim) { CUDA_KERNEL_LOOP(index, nthreads) { if((similarity[index] < 0) || (similarity[index] >= label_dim)){ //unknown label similarity[index] = Dtype(-1.0); } else if(similarity[index] > 0){ //similar label similarity[index] = Dtype(1.0); } } } template <typename Dtype> __global__ void ContinousSimilarityProcess(const int nthreads, const Dtype* similarity, const Dtype* similarity1, Dtype* similarity2, Dtype* sim, const int outer_num) { CUDA_KERNEL_LOOP(index, nthreads) { int data_id1 = index / outer_num; int data_id2 = index % outer_num; sim[index] = similarity[index] * similarity[index] / (similarity1[outer_num*data_id1+data_id1] * similarity2[outer_num*data_id2+data_id2]); if(sim[index] == 0){ sim[index] = 0.25; } } } template <typename Dtype> __global__ void RemoveZero(const int nthreads, Dtype* similarity1, Dtype* similarity2) { CUDA_KERNEL_LOOP(index, nthreads) { if(similarity1[index] == 0){ similarity1[index] = 1.0; } if(similarity2[index] == 0){ similarity2[index] = 1.0; } } } template <typename Dtype> __global__ void PairwiseLossForwardGPU(const int nthreads, const int num, const Dtype* similarity, const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data) { CUDA_KERNEL_LOOP(index, nthreads) { if(similarity[index] >= 0){ count[index] = Dtype(1.0); if((threshold >= 0) && (product[index] >= threshold)){ loss_data[index] = product[index] * (1 - (similarity[index] > 0)); } else{ loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index]; } if(similarity[index] > 0){ loss_data[index] = loss_data[index]; } } else{ count[index] = Dtype(0.0); loss_data[index] = Dtype(0.0); } } } template <typename Dtype> void PairwiseLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* similarity = loss_.mutable_gpu_data(); Dtype* dot_product = product_.mutable_gpu_data(); Dtype* exp_product = product_.mutable_gpu_diff(); Dtype* loss_data = loss_.mutable_gpu_diff(); Dtype* count = count_.mutable_gpu_data(); Dtype* similarity1 = own_similarity_.mutable_gpu_data(); Dtype* similarity2 = own_similarity_.mutable_gpu_diff(); Dtype* distance = distance_.mutable_gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_data1 = bottom[2]->gpu_data(); Dtype* label = bottom[1]->mutable_gpu_data(); Dtype* label1 = bottom[3]->mutable_gpu_data(); Dtype* row_weight = weight_vector_.mutable_gpu_data(); Dtype* column_weight = weight_vector_.mutable_gpu_diff(); Dtype* neg_row_weight = neg_weight_vector_.mutable_gpu_data(); Dtype* neg_column_weight = neg_weight_vector_.mutable_gpu_diff(); int nthreads = outer_num_ * outer_num_; Dtype loss, count_num; caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_, Dtype(1.0), label, label1, Dtype(0.0), similarity); if (continous_similarity_){ caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_, Dtype(1.0), label, label, Dtype(0.0), similarity1); caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_, Dtype(1.0), label1, label1, Dtype(0.0), similarity2); RemoveZero<Dtype><<<CAFFE_GET_BLOCKS(own_similarity_.count()), CAFFE_CUDA_NUM_THREADS>>>(own_similarity_.count(), similarity1, similarity2); ContinousSimilarityProcess<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, similarity, similarity1, similarity2, loss_data, outer_num_); caffe_gpu_memcpy(nthreads*sizeof(Dtype), loss_data, similarity1); } SimilarityProcess<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, similarity, label_dim_); DegreeRowColumn<Dtype><<<CAFFE_GET_BLOCKS(outer_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, similarity, row_weight, column_weight, neg_row_weight, neg_column_weight); caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, inner_num_, Dtype(1.0), bottom_data, bottom_data1, Dtype(0.0), dot_product); caffe_gpu_scal(outer_num_ * outer_num_, sigmoid_param_, dot_product); //calculate priority parameter start Dtype* focal_parameter = focal_.mutable_gpu_data(); CosineDistance<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, inner_num_, bottom_data, bottom_data1, distance); FocalParameter<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, 20., similarity, distance, focal_parameter); Dtype all_focal; caffe_gpu_asum(nthreads, focal_parameter, &all_focal); ave_focal_ = all_focal / Dtype(nthreads); //calculate priority parameter end caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product); PairwiseLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, similarity, exp_product, dot_product, l_threshold_, count, loss_data); //add priority start caffe_gpu_mul(nthreads, loss_data, focal_parameter, loss_data); caffe_gpu_scal(nthreads, Dtype(1. / ave_focal_), loss_data); //add priority end caffe_gpu_asum(nthreads, loss_data, &loss); caffe_gpu_asum(nthreads, count, &count_num); loss /= (count_num > 0 ? count_num : Dtype(1)); LOG(INFO) << "L loss:" << loss; loss = loss * (l_lambda_ > 0); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void PairwiseLossBackwardGPU(const int nthreads, const int num, const Dtype* similarity, const Dtype* exp_product, Dtype* count, Dtype* diff, const Dtype* row_weight, const Dtype* column_weight, const Dtype* neg_row_weight, const Dtype* neg_column_weight) { CUDA_KERNEL_LOOP(index, nthreads) { int i = index / num; int j = index % num; if(similarity[index] >= 0){ diff[index] = 2 * ( 1 / (1 + 1 / exp_product[index]) - (similarity[index] > 0) ); count[index] = Dtype(1.0); if(similarity[index] > 0){ diff[index] = diff[index] * sqrt(row_weight[i] * column_weight[j]); count[index] *= sqrt(row_weight[i] * column_weight[j]); } //else{ // diff[index] = diff[index] * sqrt(neg_row_weight[i] * neg_column_weight[j]); // count[index] *= sqrt(neg_row_weight[i] * neg_column_weight[j]); //} } else{ diff[index] = Dtype(0.0); count[index] = Dtype(0.0); } } } template <typename Dtype> void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* diff = count_.mutable_gpu_diff(); Dtype* count = count_.mutable_gpu_data(); const Dtype* similarity = loss_.gpu_data(); const Dtype* exp_product = product_.gpu_diff(); const Dtype* similarity1 = own_similarity_.gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom_diff1 = bottom[2]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_data1 = bottom[2]->gpu_data(); const Dtype* row_weight = weight_vector_.gpu_data(); const Dtype* column_weight = weight_vector_.gpu_diff(); const Dtype* neg_row_weight = neg_weight_vector_.gpu_data(); const Dtype* neg_column_weight = neg_weight_vector_.gpu_diff(); int nthreads = outer_num_ * outer_num_; //calculate diff PairwiseLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, similarity, exp_product, count, diff, row_weight, column_weight, neg_row_weight, neg_column_weight); // focal loss const Dtype* focal_parameter = focal_.gpu_data(); caffe_gpu_mul(nthreads, diff, focal_parameter, diff); caffe_gpu_scal(nthreads, Dtype(1. / ave_focal_), diff); // if(continous_similarity_){ caffe_gpu_mul(nthreads, diff, similarity1, diff); caffe_gpu_scal(nthreads, Dtype(4), diff); } //copy to bottom_diff Dtype count_num; caffe_gpu_asum(nthreads, count, &count_num); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_, l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data1, Dtype(0.0), bottom_diff); caffe_gpu_gemm(CblasTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_, l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data, Dtype(0.0), bottom_diff1); caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff); caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff1); } } INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer); } // namespace caffe
e46abfcec0b034ed92e17a8f3253ae57969ec827.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include "fix.h" #include "device_launch_parameters.h" #include <hip/device_functions.h> #include <device_atomic_functions.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> #include <math.h> #include <cstdlib> // #define CHANNELS 3 #define REDCHANNEL 'r' #define GREENCHANNEL 'g' #define BLUECHANNEL 'b' #define GRAYSCLAEREDCHANNEL 0.21 #define GRAYSCLAEGREENCHANNEL 0.71 #define GRAYSCLAEBLUECHANNEL 0.07 #define SOBEL_RADIUS 1 #define TILE_W 16 #define BLOCK_W (TILE_W + 2*SOBEL_RADIUS) #define ANGLE 50 #define HISTOGRAMMSIZE 256 //Kernel rgb to grayscale function with streams __global__ void rgbToGrayscaleKernelStream(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int offset_rgb, int offset_gray) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows unsigned char r, g, b, gray; if (x < *cu_image_width && y < *cu_image_height) { int offset = (y * (*cu_image_width) + x); int grayOffset = offset + offset_gray; int rgbOffset = offset_rgb + offset * CHANNELS; b = cu_src_image[rgbOffset]; g = cu_src_image[rgbOffset + 1]; r = cu_src_image[rgbOffset + 2]; gray = 0.21 * r + 0.71 *g + 0.07 *b; cu_dest_image[grayOffset] = gray; } } __global__ void getHistogrammTiledKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm) { __shared__ unsigned int smem[HISTOGRAMMSIZE]; int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows int shared_index = threadIdx.x + threadIdx.y * blockDim.x; int stride_shared = blockDim.x * blockDim.y; int stride_x = blockDim.x * gridDim.x; int stride_y = blockDim.y * gridDim.y; //Init shared memory histogramm with 0's for (int i = shared_index; i < HISTOGRAMMSIZE; i += stride_shared) { smem[i] = 0; } __syncthreads(); //Add data to histogramm in shared memory while (x < *cu_image_width && y < *cu_image_height) { int index = y * *cu_image_width + x; atomicAdd(&(smem[cu_src_image[index]]), 1); x += stride_x; y += stride_y; } __syncthreads(); /* long test = 0; if (threadIdx.x == 0) { for (int j = 0; j < HISTOGRAMMSIZE; j++) { test += smem[j]; } printf("Smem total per block: %lu \n", test); }*/ //Add shared memory histogramm part to global memory histogramm for (int i = shared_index; i < HISTOGRAMMSIZE; i+= stride_shared) { atomicAdd(&(cu_dest_histogramm[i]), smem[i]); } __syncthreads(); } __global__ void getHistogrammKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows int stride_x = blockDim.x * gridDim.x; int stride_y = blockDim.y * gridDim.y; while (x < *cu_image_width && y < *cu_image_height) { int index = y * *cu_image_width + x; atomicAdd(&(cu_dest_histogramm[cu_src_image[index]]), 1); x += stride_x; y += stride_y; } } __global__ void sobelFilterTexture(int *cu_image_width, int *cu_image_height, unsigned char *cu_output, hipTextureObject_t cu_texObj, float theta) { int sobel_x[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } }; int sobel_y[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < *cu_image_width - 1 && y < *cu_image_height - 1) { int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0; for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) { for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) { //Calc normalized texture coordinates float u = (x + k) / (float)*cu_image_width; float v = (y + j) / (float)*cu_image_height; // Transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; sobel_gradient_x += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; sobel_gradient_y += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; } } //Calc Sobel magnitude and save it to the image sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2)); cu_output[y * *cu_image_width + x] = (unsigned char)sobel_magnitude; } }; //Kernel sobel function __global__ void sobelFilterKernelTiled(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image) { __shared__ char ds_Img[BLOCK_W][BLOCK_W]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int sobel_x[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } }; int sobel_y[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; int x = bx * TILE_W + tx - SOBEL_RADIUS; //cols int y = by * TILE_W + ty - SOBEL_RADIUS; //rows //Make sure x/y are not negative if (x < 0) { x = 0; } if (y < 0) { y = 0; } //Calc index of global memory int global_index = (y * (*cu_image_width) + x); //Load Data into Shared Memory //Insert 0 if the thread is supposed to fill the filter radius border of the tile if (x >= 0 && x < *cu_image_width - 1 && y >= 0 && y < *cu_image_height - 1) { ds_Img[ty][tx] = cu_src_image[global_index]; } else { ds_Img[ty][tx] = 0; } __syncthreads(); //Calc Sobel X & Y if the thread is inside the filter area if ((tx >= SOBEL_RADIUS) && (tx <= TILE_W) && (ty >= SOBEL_RADIUS) && (ty <= TILE_W)){ int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0; for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) { for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) { sobel_gradient_x += ds_Img[ty + j][tx + k] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; sobel_gradient_y += ds_Img[ty + j][tx + k] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; } } //Calc Sobel magnitude and save it to the original image sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2)); cu_dest_image[global_index] = (unsigned char)sobel_magnitude; } } __global__ void sobelFilterKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image) { int sobel_x[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } }; int sobel_y[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows //Calc index int global_index = (y * (*cu_image_width) + x); if (x >= SOBEL_RADIUS && x < *cu_image_width - 1 && y >= SOBEL_RADIUS && y < *cu_image_height - 1) { //Calc Sobel X & Y if the thread is inside the filter area int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0; for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) { for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) { sobel_gradient_x += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; sobel_gradient_y += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; } } //Calc Sobel magnitude and save it to the image sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2)); cu_dest_image[global_index] = (unsigned char)sobel_magnitude; } else { if (x < *cu_image_width && y < *cu_image_height) { cu_dest_image[global_index] = 0; } } } //Kernel rgb to grayscale function __global__ void rgbToGrayscaleKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows unsigned char r, g, b, gray; if (x < *cu_image_width && y < *cu_image_height) { int grayOffset = (y * (*cu_image_width) + x); int rgbOffset = grayOffset * CHANNELS; b = cu_src_image[rgbOffset]; g = cu_src_image[rgbOffset + 1]; r = cu_src_image[rgbOffset + 2]; gray = 0.21 * r + 0.71 *g + 0.07 *b; cu_dest_image[grayOffset] = gray; } } //Kernel ColorChannel function __global__ void setColorChannelKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, unsigned char *cu_channel_to_keep) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows unsigned char r, g, b; if (x < *cu_image_width && y < *cu_image_height) { int offset = (y * (*cu_image_width) + x) * CHANNELS; switch (*cu_channel_to_keep) { case BLUECHANNEL: b = cu_src_image[offset]; g = 0; r = 0; break; case GREENCHANNEL: b = 0; g = cu_src_image[offset + 1]; r = 0; break; case REDCHANNEL: b = 0; g = 0; r = cu_src_image[offset + 2]; break; default: //Defaults to REDCHANNEL b = 0; g = 0; r = cu_src_image[offset + 2]; break; } cu_dest_image[offset] = b; //B cu_dest_image[offset + 1] = g; //G cu_dest_image[offset + 2] = r; //R } }; void setColorChannel(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image, unsigned char channel_to_keep) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image, *d_channel_to_keep; unsigned int imgSize = (image_width * image_height) * CHANNELS * sizeof(unsigned char); hipError_t err = hipSuccess; //Set Device err = hipSetDevice(0); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_image_width, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = hipMalloc((void **)&d_image_height, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy channel to keep to gpu err = hipMalloc((void **)&d_channel_to_keep, sizeof(unsigned char)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_channel_to_keep, &channel_to_keep, sizeof(unsigned char), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = hipMalloc((void **)&d_src_image, imgSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = hipMalloc((void **)&d_dest_image, imgSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_dest_image, dest_image, imgSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Pro Grid N/16 Blcke, n = Anzahl Threads dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); setColorChannelKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image, d_channel_to_keep); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err); } err = hipMemcpy(dest_image, d_dest_image, imgSize, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipFree(d_image_width); hipFree(d_image_height); hipFree(d_channel_to_keep); hipFree(d_src_image); hipFree(d_dest_image); } void rgbToGrayscale(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image; unsigned int imgSizeRgb = (image_width * image_height) * CHANNELS * sizeof(unsigned char); unsigned int imgSizeGray = (image_width * image_height) * sizeof(unsigned char); hipError_t err = hipSuccess; //Set Device err = hipSetDevice(0); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_image_width, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = hipMalloc((void **)&d_image_height, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = hipMalloc((void **)&d_src_image, imgSizeRgb); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_src_image, src_image, imgSizeRgb, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = hipMalloc((void **)&d_dest_image, imgSizeGray); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_dest_image, dest_image, imgSizeGray, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Pro Grid N/16 Blcke, n = Anzahl Threads dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); //rgbToGrayscaleKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err); } err = hipMemcpy(dest_image, d_dest_image, imgSizeGray, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipFree(d_image_width); hipFree(d_image_height); hipFree(d_src_image); hipFree(d_dest_image); }; void sobelFilter(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); hipError_t err = hipSuccess; //Set Device err = hipSetDevice(0); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_image_width, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = hipMalloc((void **)&d_image_height, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = hipMalloc((void **)&d_src_image, imgSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = hipMalloc((void **)&d_dest_image, imgSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_dest_image, dest_image, imgSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Per Grid N/16 Blocks dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); hipLaunchKernelGGL(( sobelFilterKernel) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_image_width, d_image_height, d_src_image, d_dest_image); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = hipMemcpy(dest_image, d_dest_image, imgSize, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipFree(d_image_width); hipFree(d_image_height); hipFree(d_src_image); hipFree(d_dest_image); }; void sobelFilterShared(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); hipError_t err = hipSuccess; //Set Device err = hipSetDevice(0); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_image_width, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = hipMalloc((void **)&d_image_height, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = hipMalloc((void **)&d_src_image, imgSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = hipMalloc((void **)&d_dest_image, imgSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_dest_image, dest_image, imgSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // Use a Grid with one Block containing Block_width threads dim3 threads_per_block_tiled(BLOCK_W, BLOCK_W, 1); //Per Grid N/Tile_wisth blocks dim3 blocks_per_grid_tiled((image_width - 1) / TILE_W + 1, (image_height - 1) / TILE_W + 1, 1); sobelFilterKernelTiled << <blocks_per_grid_tiled, threads_per_block_tiled >> >(d_image_width, d_image_height, d_src_image, d_dest_image); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = hipMemcpy(dest_image, d_dest_image, imgSize, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipFree(d_image_width); hipFree(d_image_height); hipFree(d_src_image); hipFree(d_dest_image); }; void sobelFilterTexture(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); hipError_t err = hipSuccess; //Set Device err = hipSetDevice(0); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Create ChannelDesc //Sets output format of the value when the texture is fetched i.e. float texel hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned); //Create cuda array hipArray *cuArray; //Allocate cuda array err = hipMallocArray(&cuArray, &channelDesc, image_width, image_height); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image data to cuda array err = hipMemcpyToArray(cuArray, 0, 0, src_image, image_height * image_width * sizeof(unsigned char), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Set Texture struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = cuArray; //Set Texture object params struct hipTextureDesc textDesc; memset(&textDesc, 0, sizeof(textDesc)); textDesc.addressMode[0] = hipAddressModeMirror; textDesc.addressMode[1] = hipAddressModeMirror; textDesc.filterMode = hipFilterModeLinear; textDesc.readMode = hipReadModeNormalizedFloat; textDesc.normalizedCoords = 1; //Create Texture Object hipTextureObject_t texObj = 0; hipCreateTextureObject(&texObj, &resDesc, &textDesc, NULL); unsigned char *output; err = hipMalloc(&output, image_height * image_width * sizeof(unsigned char)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // err = hipMalloc((void **)&d_image_width, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = hipMalloc((void **)&d_image_height, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } float angle = 0; unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Per Grid N/16 Blocks dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); hipLaunchKernelGGL(( sobelFilterTexture) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_image_width, d_image_height, output, texObj, angle); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = hipMemcpy(dest_image, output, imgSize, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } hipFree(d_image_width); hipFree(d_image_height); hipDestroyTextureObject(texObj); hipFreeArray(cuArray); hipFree(output); }; void getHistogramm(int image_width, int image_height, unsigned char *src_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image; unsigned int *d_dest_histogramm; unsigned int histogramm[HISTOGRAMMSIZE] = { 0 }; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); unsigned int histogrammSize = HISTOGRAMMSIZE * sizeof(unsigned int); hipError_t err = hipSuccess; //Set Device err = hipSetDevice(0); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_image_width, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = hipMalloc((void **)&d_image_height, sizeof(int)); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = hipMalloc((void **)&d_src_image, imgSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = hipMalloc((void **)&d_dest_histogramm, histogrammSize); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_dest_histogramm, histogramm, histogrammSize, hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Per Grid N/16 Blocks dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); hipLaunchKernelGGL(( getHistogrammTiledKernel) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_image_width, d_image_height, d_src_image, d_dest_histogramm); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = hipMemcpy(histogramm, d_dest_histogramm, histogrammSize, hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } long histoCount = 0; for (int i = 0; i < HISTOGRAMMSIZE; i++) { histoCount += histogramm[i]; } printf("HistogrammSize: %ld \n", histoCount); hipFree(d_dest_histogramm); hipFree(d_image_width); hipFree(d_image_height); hipFree(d_src_image); }; void streamAufgabe5(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image;// , *d_dest_image_sobel; unsigned int imgSize = image_width * image_height; unsigned int imgSizeRgb = imgSize * CHANNELS * sizeof(unsigned char); unsigned int imgSizeGray = imgSize * sizeof(unsigned char); //Cuda Stream vars const unsigned int stream_count = 4; //Kernel vars unsigned int threads = 16; int stream_width = image_width; int stream_height = image_height / stream_count; int stream_size = stream_width * stream_height; int stream_size_gray = stream_size * sizeof(unsigned char); int stream_size_rgb = stream_size * CHANNELS * sizeof(unsigned char); // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Pro Grid N/16 Blcke dim3 blocks_per_grid((stream_width - 1) / threads + 1, (stream_height - 1) / threads + 1, 1); hipStream_t streams[stream_count]; int dev_count; hipDeviceProp_t prop; hipError_t err = hipSuccess; //Enable device Overlap err = hipGetDeviceCount(&dev_count); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Set device to a device with overlap property for (int i = 0; i < dev_count; i++) { hipGetDeviceProperties(&prop, i); if (prop.deviceOverlap) { err = hipSetDevice(i); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } } err = hipHostMalloc((void **)&d_image_width, sizeof(int), hipHostMallocDefault); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_width, &stream_width, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = hipHostMalloc((void **)&d_image_height, sizeof(int), hipHostMallocDefault); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(d_image_height, &stream_height, sizeof(int), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = hipHostMalloc((void **)&d_src_image, imgSizeRgb, hipHostMallocDefault); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Alloc memory for grayscale image err = hipHostMalloc((void **)&d_dest_image, imgSizeGray, hipHostMallocDefault); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Create cuda Streams & memory for each stream for (int i = 0; i < stream_count; i++) { //Create cuda Streams err = hipStreamCreate(&streams[i]); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } //fill memory for (int i = 0; i < stream_count; i++) { //calc offset for memory copy int offset_gray = i * stream_size; int offset_rgb = offset_gray * CHANNELS; //copy memory for each stream err = hipMemcpyAsync(&d_src_image[offset_rgb], &src_image[offset_rgb], stream_size_rgb, hipMemcpyHostToDevice, streams[i]); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = hipMemcpyAsync(&d_dest_image[offset_gray], &dest_image[offset_gray], stream_size_gray, hipMemcpyHostToDevice, streams[i]); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } //execute kernels for (int i = 0; i < stream_count; i++) { int offset_gray = i * stream_size; int offset_rgb = offset_gray * CHANNELS; hipLaunchKernelGGL(( rgbToGrayscaleKernelStream), dim3(blocks_per_grid), dim3(threads_per_block), 0, streams[i], d_image_width, d_image_height, d_src_image, d_dest_image, offset_rgb, offset_gray); } for (int i = 0; i < stream_count; i++) { err = hipStreamSynchronize(streams[i]); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } for (int i = 0; i < stream_count; i++) { int offset = i * stream_size; //printf("offset: %d\n", offset); err = hipMemcpyAsync(&dest_image[offset], &d_dest_image[offset], stream_size_gray, hipMemcpyDeviceToHost, streams[i]); if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } hipHostFree(d_image_width); hipHostFree(d_image_height); hipHostFree(d_src_image); hipHostFree(d_dest_image); for (int i = 0; i < stream_count; i++) { hipStreamDestroy(streams[i]); } };
e46abfcec0b034ed92e17a8f3253ae57969ec827.cu
#include "cuda_runtime.h" #include "kernel.h" #include "fix.h" #include "device_launch_parameters.h" #include <device_functions.h> #include <device_atomic_functions.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> #include <math.h> #include <cstdlib> // #define CHANNELS 3 #define REDCHANNEL 'r' #define GREENCHANNEL 'g' #define BLUECHANNEL 'b' #define GRAYSCLAEREDCHANNEL 0.21 #define GRAYSCLAEGREENCHANNEL 0.71 #define GRAYSCLAEBLUECHANNEL 0.07 #define SOBEL_RADIUS 1 #define TILE_W 16 #define BLOCK_W (TILE_W + 2*SOBEL_RADIUS) #define ANGLE 50 #define HISTOGRAMMSIZE 256 //Kernel rgb to grayscale function with streams __global__ void rgbToGrayscaleKernelStream(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int offset_rgb, int offset_gray) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows unsigned char r, g, b, gray; if (x < *cu_image_width && y < *cu_image_height) { int offset = (y * (*cu_image_width) + x); int grayOffset = offset + offset_gray; int rgbOffset = offset_rgb + offset * CHANNELS; b = cu_src_image[rgbOffset]; g = cu_src_image[rgbOffset + 1]; r = cu_src_image[rgbOffset + 2]; gray = 0.21 * r + 0.71 *g + 0.07 *b; cu_dest_image[grayOffset] = gray; } } __global__ void getHistogrammTiledKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm) { __shared__ unsigned int smem[HISTOGRAMMSIZE]; int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows int shared_index = threadIdx.x + threadIdx.y * blockDim.x; int stride_shared = blockDim.x * blockDim.y; int stride_x = blockDim.x * gridDim.x; int stride_y = blockDim.y * gridDim.y; //Init shared memory histogramm with 0's for (int i = shared_index; i < HISTOGRAMMSIZE; i += stride_shared) { smem[i] = 0; } __syncthreads(); //Add data to histogramm in shared memory while (x < *cu_image_width && y < *cu_image_height) { int index = y * *cu_image_width + x; atomicAdd(&(smem[cu_src_image[index]]), 1); x += stride_x; y += stride_y; } __syncthreads(); /* long test = 0; if (threadIdx.x == 0) { for (int j = 0; j < HISTOGRAMMSIZE; j++) { test += smem[j]; } printf("Smem total per block: %lu \n", test); }*/ //Add shared memory histogramm part to global memory histogramm for (int i = shared_index; i < HISTOGRAMMSIZE; i+= stride_shared) { atomicAdd(&(cu_dest_histogramm[i]), smem[i]); } __syncthreads(); } __global__ void getHistogrammKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows int stride_x = blockDim.x * gridDim.x; int stride_y = blockDim.y * gridDim.y; while (x < *cu_image_width && y < *cu_image_height) { int index = y * *cu_image_width + x; atomicAdd(&(cu_dest_histogramm[cu_src_image[index]]), 1); x += stride_x; y += stride_y; } } __global__ void sobelFilterTexture(int *cu_image_width, int *cu_image_height, unsigned char *cu_output, cudaTextureObject_t cu_texObj, float theta) { int sobel_x[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } }; int sobel_y[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < *cu_image_width - 1 && y < *cu_image_height - 1) { int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0; for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) { for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) { //Calc normalized texture coordinates float u = (x + k) / (float)*cu_image_width; float v = (y + j) / (float)*cu_image_height; // Transform coordinates u -= 0.5f; v -= 0.5f; float tu = u * cosf(theta) - v * sinf(theta) + 0.5f; float tv = v * cosf(theta) + u * sinf(theta) + 0.5f; sobel_gradient_x += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; sobel_gradient_y += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; } } //Calc Sobel magnitude and save it to the image sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2)); cu_output[y * *cu_image_width + x] = (unsigned char)sobel_magnitude; } }; //Kernel sobel function __global__ void sobelFilterKernelTiled(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image) { __shared__ char ds_Img[BLOCK_W][BLOCK_W]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int sobel_x[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } }; int sobel_y[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; int x = bx * TILE_W + tx - SOBEL_RADIUS; //cols int y = by * TILE_W + ty - SOBEL_RADIUS; //rows //Make sure x/y are not negative if (x < 0) { x = 0; } if (y < 0) { y = 0; } //Calc index of global memory int global_index = (y * (*cu_image_width) + x); //Load Data into Shared Memory //Insert 0 if the thread is supposed to fill the filter radius border of the tile if (x >= 0 && x < *cu_image_width - 1 && y >= 0 && y < *cu_image_height - 1) { ds_Img[ty][tx] = cu_src_image[global_index]; } else { ds_Img[ty][tx] = 0; } __syncthreads(); //Calc Sobel X & Y if the thread is inside the filter area if ((tx >= SOBEL_RADIUS) && (tx <= TILE_W) && (ty >= SOBEL_RADIUS) && (ty <= TILE_W)){ int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0; for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) { for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) { sobel_gradient_x += ds_Img[ty + j][tx + k] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; sobel_gradient_y += ds_Img[ty + j][tx + k] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; } } //Calc Sobel magnitude and save it to the original image sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2)); cu_dest_image[global_index] = (unsigned char)sobel_magnitude; } } __global__ void sobelFilterKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image) { int sobel_x[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } }; int sobel_y[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows //Calc index int global_index = (y * (*cu_image_width) + x); if (x >= SOBEL_RADIUS && x < *cu_image_width - 1 && y >= SOBEL_RADIUS && y < *cu_image_height - 1) { //Calc Sobel X & Y if the thread is inside the filter area int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0; for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) { for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) { sobel_gradient_x += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; sobel_gradient_y += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS]; } } //Calc Sobel magnitude and save it to the image sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2)); cu_dest_image[global_index] = (unsigned char)sobel_magnitude; } else { if (x < *cu_image_width && y < *cu_image_height) { cu_dest_image[global_index] = 0; } } } //Kernel rgb to grayscale function __global__ void rgbToGrayscaleKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows unsigned char r, g, b, gray; if (x < *cu_image_width && y < *cu_image_height) { int grayOffset = (y * (*cu_image_width) + x); int rgbOffset = grayOffset * CHANNELS; b = cu_src_image[rgbOffset]; g = cu_src_image[rgbOffset + 1]; r = cu_src_image[rgbOffset + 2]; gray = 0.21 * r + 0.71 *g + 0.07 *b; cu_dest_image[grayOffset] = gray; } } //Kernel ColorChannel function __global__ void setColorChannelKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, unsigned char *cu_channel_to_keep) { int x = blockIdx.x * blockDim.x + threadIdx.x; //cols int y = blockIdx.y * blockDim.y + threadIdx.y; //rows unsigned char r, g, b; if (x < *cu_image_width && y < *cu_image_height) { int offset = (y * (*cu_image_width) + x) * CHANNELS; switch (*cu_channel_to_keep) { case BLUECHANNEL: b = cu_src_image[offset]; g = 0; r = 0; break; case GREENCHANNEL: b = 0; g = cu_src_image[offset + 1]; r = 0; break; case REDCHANNEL: b = 0; g = 0; r = cu_src_image[offset + 2]; break; default: //Defaults to REDCHANNEL b = 0; g = 0; r = cu_src_image[offset + 2]; break; } cu_dest_image[offset] = b; //B cu_dest_image[offset + 1] = g; //G cu_dest_image[offset + 2] = r; //R } }; void setColorChannel(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image, unsigned char channel_to_keep) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image, *d_channel_to_keep; unsigned int imgSize = (image_width * image_height) * CHANNELS * sizeof(unsigned char); cudaError_t err = cudaSuccess; //Set Device err = cudaSetDevice(0); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_image_width, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = cudaMalloc((void **)&d_image_height, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy channel to keep to gpu err = cudaMalloc((void **)&d_channel_to_keep, sizeof(unsigned char)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_channel_to_keep, &channel_to_keep, sizeof(unsigned char), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = cudaMalloc((void **)&d_src_image, imgSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = cudaMalloc((void **)&d_dest_image, imgSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_dest_image, dest_image, imgSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Pro Grid N/16 Blöcke, n = Anzahl Threads dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); setColorChannelKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image, d_channel_to_keep); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err); } err = cudaMemcpy(dest_image, d_dest_image, imgSize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaFree(d_image_width); cudaFree(d_image_height); cudaFree(d_channel_to_keep); cudaFree(d_src_image); cudaFree(d_dest_image); } void rgbToGrayscale(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image; unsigned int imgSizeRgb = (image_width * image_height) * CHANNELS * sizeof(unsigned char); unsigned int imgSizeGray = (image_width * image_height) * sizeof(unsigned char); cudaError_t err = cudaSuccess; //Set Device err = cudaSetDevice(0); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_image_width, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = cudaMalloc((void **)&d_image_height, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = cudaMalloc((void **)&d_src_image, imgSizeRgb); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_src_image, src_image, imgSizeRgb, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = cudaMalloc((void **)&d_dest_image, imgSizeGray); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_dest_image, dest_image, imgSizeGray, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Pro Grid N/16 Blöcke, n = Anzahl Threads dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); //rgbToGrayscaleKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err); } err = cudaMemcpy(dest_image, d_dest_image, imgSizeGray, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaFree(d_image_width); cudaFree(d_image_height); cudaFree(d_src_image); cudaFree(d_dest_image); }; void sobelFilter(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); cudaError_t err = cudaSuccess; //Set Device err = cudaSetDevice(0); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_image_width, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = cudaMalloc((void **)&d_image_height, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = cudaMalloc((void **)&d_src_image, imgSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = cudaMalloc((void **)&d_dest_image, imgSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_dest_image, dest_image, imgSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Per Grid N/16 Blocks dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); sobelFilterKernel <<<blocks_per_grid, threads_per_block >>>(d_image_width, d_image_height, d_src_image, d_dest_image); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = cudaMemcpy(dest_image, d_dest_image, imgSize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaFree(d_image_width); cudaFree(d_image_height); cudaFree(d_src_image); cudaFree(d_dest_image); }; void sobelFilterShared(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); cudaError_t err = cudaSuccess; //Set Device err = cudaSetDevice(0); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_image_width, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = cudaMalloc((void **)&d_image_height, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = cudaMalloc((void **)&d_src_image, imgSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = cudaMalloc((void **)&d_dest_image, imgSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_dest_image, dest_image, imgSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // Use a Grid with one Block containing Block_width threads dim3 threads_per_block_tiled(BLOCK_W, BLOCK_W, 1); //Per Grid N/Tile_wisth blocks dim3 blocks_per_grid_tiled((image_width - 1) / TILE_W + 1, (image_height - 1) / TILE_W + 1, 1); sobelFilterKernelTiled << <blocks_per_grid_tiled, threads_per_block_tiled >> >(d_image_width, d_image_height, d_src_image, d_dest_image); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = cudaMemcpy(dest_image, d_dest_image, imgSize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaFree(d_image_width); cudaFree(d_image_height); cudaFree(d_src_image); cudaFree(d_dest_image); }; void sobelFilterTexture(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); cudaError_t err = cudaSuccess; //Set Device err = cudaSetDevice(0); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Create ChannelDesc //Sets output format of the value when the texture is fetched i.e. float texel cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); //Create cuda array cudaArray *cuArray; //Allocate cuda array err = cudaMallocArray(&cuArray, &channelDesc, image_width, image_height); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image data to cuda array err = cudaMemcpyToArray(cuArray, 0, 0, src_image, image_height * image_width * sizeof(unsigned char), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Set Texture struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cuArray; //Set Texture object params struct cudaTextureDesc textDesc; memset(&textDesc, 0, sizeof(textDesc)); textDesc.addressMode[0] = cudaAddressModeMirror; textDesc.addressMode[1] = cudaAddressModeMirror; textDesc.filterMode = cudaFilterModeLinear; textDesc.readMode = cudaReadModeNormalizedFloat; textDesc.normalizedCoords = 1; //Create Texture Object cudaTextureObject_t texObj = 0; cudaCreateTextureObject(&texObj, &resDesc, &textDesc, NULL); unsigned char *output; err = cudaMalloc(&output, image_height * image_width * sizeof(unsigned char)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // err = cudaMalloc((void **)&d_image_width, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = cudaMalloc((void **)&d_image_height, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } float angle = 0; unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Per Grid N/16 Blocks dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); sobelFilterTexture <<<blocks_per_grid, threads_per_block >>>(d_image_width, d_image_height, output, texObj, angle); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = cudaMemcpy(dest_image, output, imgSize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaFree(d_image_width); cudaFree(d_image_height); cudaDestroyTextureObject(texObj); cudaFreeArray(cuArray); cudaFree(output); }; void getHistogramm(int image_width, int image_height, unsigned char *src_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image; unsigned int *d_dest_histogramm; unsigned int histogramm[HISTOGRAMMSIZE] = { 0 }; unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char); unsigned int histogrammSize = HISTOGRAMMSIZE * sizeof(unsigned int); cudaError_t err = cudaSuccess; //Set Device err = cudaSetDevice(0); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_image_width, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = cudaMalloc((void **)&d_image_height, sizeof(int)); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = cudaMalloc((void **)&d_src_image, imgSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image dest to gpu err = cudaMalloc((void **)&d_dest_histogramm, histogrammSize); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_dest_histogramm, histogramm, histogrammSize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } unsigned int threads = 16; // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Per Grid N/16 Blocks dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1); getHistogrammTiledKernel <<<blocks_per_grid, threads_per_block >>>(d_image_width, d_image_height, d_src_image, d_dest_histogramm); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err); exit(EXIT_FAILURE); } err = cudaMemcpy(histogramm, d_dest_histogramm, histogrammSize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } long histoCount = 0; for (int i = 0; i < HISTOGRAMMSIZE; i++) { histoCount += histogramm[i]; } printf("HistogrammSize: %ld \n", histoCount); cudaFree(d_dest_histogramm); cudaFree(d_image_width); cudaFree(d_image_height); cudaFree(d_src_image); }; void streamAufgabe5(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image) { int *d_image_width, *d_image_height; unsigned char *d_src_image, *d_dest_image;// , *d_dest_image_sobel; unsigned int imgSize = image_width * image_height; unsigned int imgSizeRgb = imgSize * CHANNELS * sizeof(unsigned char); unsigned int imgSizeGray = imgSize * sizeof(unsigned char); //Cuda Stream vars const unsigned int stream_count = 4; //Kernel vars unsigned int threads = 16; int stream_width = image_width; int stream_height = image_height / stream_count; int stream_size = stream_width * stream_height; int stream_size_gray = stream_size * sizeof(unsigned char); int stream_size_rgb = stream_size * CHANNELS * sizeof(unsigned char); // Use a Grid with one Block containing 16x16 Threads dim3 threads_per_block(threads, threads, 1); //Pro Grid N/16 Blöcke dim3 blocks_per_grid((stream_width - 1) / threads + 1, (stream_height - 1) / threads + 1, 1); cudaStream_t streams[stream_count]; int dev_count; cudaDeviceProp prop; cudaError_t err = cudaSuccess; //Enable device Overlap err = cudaGetDeviceCount(&dev_count); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Set device to a device with overlap property for (int i = 0; i < dev_count; i++) { cudaGetDeviceProperties(&prop, i); if (prop.deviceOverlap) { err = cudaSetDevice(i); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } } err = cudaHostAlloc((void **)&d_image_width, sizeof(int), cudaHostAllocDefault); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_width, &stream_width, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image height to gpu err = cudaHostAlloc((void **)&d_image_height, sizeof(int), cudaHostAllocDefault); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(d_image_height, &stream_height, sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Copy image src to gpu err = cudaHostAlloc((void **)&d_src_image, imgSizeRgb, cudaHostAllocDefault); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Alloc memory for grayscale image err = cudaHostAlloc((void **)&d_dest_image, imgSizeGray, cudaHostAllocDefault); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } //Create cuda Streams & memory for each stream for (int i = 0; i < stream_count; i++) { //Create cuda Streams err = cudaStreamCreate(&streams[i]); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } //fill memory for (int i = 0; i < stream_count; i++) { //calc offset for memory copy int offset_gray = i * stream_size; int offset_rgb = offset_gray * CHANNELS; //copy memory for each stream err = cudaMemcpyAsync(&d_src_image[offset_rgb], &src_image[offset_rgb], stream_size_rgb, cudaMemcpyHostToDevice, streams[i]); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpyAsync(&d_dest_image[offset_gray], &dest_image[offset_gray], stream_size_gray, cudaMemcpyHostToDevice, streams[i]); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } //execute kernels for (int i = 0; i < stream_count; i++) { int offset_gray = i * stream_size; int offset_rgb = offset_gray * CHANNELS; rgbToGrayscaleKernelStream<<<blocks_per_grid, threads_per_block, 0, streams[i]>>>(d_image_width, d_image_height, d_src_image, d_dest_image, offset_rgb, offset_gray); } for (int i = 0; i < stream_count; i++) { err = cudaStreamSynchronize(streams[i]); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } for (int i = 0; i < stream_count; i++) { int offset = i * stream_size; //printf("offset: %d\n", offset); err = cudaMemcpyAsync(&dest_image[offset], &d_dest_image[offset], stream_size_gray, cudaMemcpyDeviceToHost, streams[i]); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } cudaFreeHost(d_image_width); cudaFreeHost(d_image_height); cudaFreeHost(d_src_image); cudaFreeHost(d_dest_image); for (int i = 0; i < stream_count; i++) { cudaStreamDestroy(streams[i]); } };
d9a431e96fec2c228a3dc959fe5de0f8dd68391c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Copyright 2013-2014 METU, Middle East Technical University, Informatics Institute * * This file is part of H-TLD. * * H-TLD is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * H-TLD is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the one from Surrey University for TLD Algorithm developed by Zdenek Kalal. * If not, see <http://www.gnu.org/licenses/>. * Please contact Alptekin TEMIZEL for more info about * licensing atemizel@metu.edu.tr. * */ /* * mem_management.cu * * Author: Ilker GURCAN */ //TODO Don't Forget to Catch All Exceptions Properly... #define HETLD_EXPORTS #include<iostream> #include<stdio.h> #include<stdlib.h> #include<nppi.h> #include<device_launch_parameters.h> #include<opencv2/gpu/stream_accessor.hpp> #include "hetld_macros.hpp" #include "hetld_errors.hpp" #include "utilities.hpp" #include "mem_management.cuh" using namespace cv::gpu; __host__ MemoryManagement::MemoryManagement(int f_width, int f_height, double sigma) { hipError_t cuda_status; cv::gpu::StreamAccessor accessor; //Initialize Module Variables... _s_roi = (NppiSize*)malloc(sizeof(NppiSize)); _s_roi->width = f_width; _s_roi->height = f_height; _d_roi = (NppiSize*)malloc(sizeof(NppiSize)); _d_roi->height = f_height; _cv_cuda_stream = accessor.getStream(_cv_stream); //Check Out http://docs.opencv.org/modules/imgproc/doc/filtering.html#getgaussiankernel _gaussian_filter_gpu = createGaussianFilter_GPU(CV_8UC1, cv::Size((int)(6 * sigma) + 1, (int)(6 * sigma) + 1), sigma, 0.0, cv::BORDER_CONSTANT, cv::BORDER_CONSTANT); //Create an Aligned Data... _dev_cur_frame = nppiMalloc_8u_C1(f_width, f_height, &_d_roi->width); if(_dev_cur_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Current Frame", "Constructor", "nppiMalloc_8u_C1 Error"), mem_management, ER_MEM_MAN_CUDA_MALLOC); _dev_prev_frame = nppiMalloc_8u_C1(f_width, f_height, &_d_roi->width); if(_dev_prev_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Previous Frame", "Constructor", "nppiMalloc_8u_C1 Error"), mem_management, ER_MEM_MAN_CUDA_MALLOC); cuda_status = hipMalloc((void**)(&_dev_blurred_cur_frame), sizeof(Npp8u) * f_width * f_height); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Blurred Current Frame", "Constructor", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); //Allocate Other Required Resources... cuda_status = hipMalloc((void **)(&_dev_ii), sizeof(Npp32s) * (_d_roi->width + 1) * (_d_roi->height + 1)); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Integral Image", "Constructor", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); cuda_status = hipMalloc((void **)(&_dev_ii2), sizeof(Npp32s) * (_d_roi->width + 1) * (_d_roi->height + 1)); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Squared Integral Image", "Constructor", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); _host_ii = (Npp32s*)malloc(sizeof(Npp32s) * f_width * f_height); _host_ii2 = (Npp32s*)malloc(sizeof(Npp32s) * f_width * f_height); //In Order to Allocate it Once Per Process... if(_s_roi->width != _d_roi->width) { cuda_status = hipMalloc((void**)(&_dev_temp_frame_buf), sizeof(Npp8u) * f_width * f_height); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Temporary Frame Buffer", "Constructor", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); } else _dev_temp_frame_buf = NULL; } __host__ void findKLPForChangingDSF(int total_load, int* num_blocks, int* num_threads_per_block) { hipDeviceProp_t *dev_prop; int expected_num_of_blocks; dev_prop = getDeviceProps(0); *num_threads_per_block = findOptimalNumOfTPB(0); expected_num_of_blocks = (int)ceil(total_load / (float)(*num_threads_per_block)); //Some Resources Are Idle! Use As Much of Those Resources As You Can... if(expected_num_of_blocks < dev_prop->multiProcessorCount) { while(*num_threads_per_block > 64 && expected_num_of_blocks < dev_prop->multiProcessorCount) { (*num_threads_per_block) -= 32; expected_num_of_blocks = (int)ceil(total_load / (float)(*num_threads_per_block)); }//End of while-Loop... *num_blocks = expected_num_of_blocks; } else *num_blocks = expected_num_of_blocks; } //******************************************************************************** //***************************Moving BBOX Offs to GPU...*************************** //******************************************************************************** //0-)num_of_bboxes, //1-)total_item_count, //2-)stride //Constant Memory May Broadcast the Same Value to Multiple Thread Registers At Once! //Don't Define Them As Anything Other Than Arrays... It is What CUDA Means By Symbol! __constant__ int BBOX_INT_PARAMS[3]; __constant__ float ONE_OVER_NUM_BB_ATTRS[1]; __global__ void convertBBOXOffsToSoA(int* array_of_structures, int* structure_of_arrays) { int attr_index; int idx_to_soa; int i; for(i = blockIdx.x * blockDim.x + threadIdx.x; i < BBOX_INT_PARAMS[1]; i += BBOX_INT_PARAMS[2]) { attr_index = i - floor(i * ONE_OVER_NUM_BB_ATTRS[0]) * NUM_OF_BBOX_ATTRS_ON_GPU;// i % NUM_OF_BBOX_ATTRS idx_to_soa = attr_index * BBOX_INT_PARAMS[0] + //Ptr to Start Index of the Current Attribute floor(i * ONE_OVER_NUM_BB_ATTRS[0]);//Ptr to That Attribute of This BBOX structure_of_arrays[idx_to_soa] = array_of_structures[i]; }//End of for-Loop... } __host__ void MemoryManagement::moveBBOXOffsToGPU(int *load_balanced_bb_offs, int *dev_bbox_offs, int num_of_bboxes) { hipError_t cuda_status; int *d_offs; int num_blocks = 0; int num_threads_per_block = 0; int total_load; int consts[3]; float one_over_num_attrs; total_load = NUM_OF_BBOX_ATTRS_ON_GPU * num_of_bboxes; cuda_status = hipMalloc((void **)(&d_offs), sizeof(int) * total_load); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "BBOX Offs", "moveBBOXOffsToGPU", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); cuda_status = hipMemcpy((void*)d_offs, load_balanced_bb_offs, sizeof(int) * total_load, hipMemcpyHostToDevice); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "BBOX Offs", "Host", "Device", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); //Find Kernel Launch Parameters for High Occupancy... findKLPForChangingDSF(total_load, &num_blocks, &num_threads_per_block); if(num_threads_per_block < 64 || num_blocks <= 0) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_KERNEL_EXEC_CONFIG_PR), "convertBBOXOffsToSoA"), mem_management, ER_MEM_MAN_KERNEL_EXEC_CONFIG_PR); //Integer Constants... consts[0] = num_of_bboxes; consts[1] = total_load; consts[2] = num_blocks * num_threads_per_block; //Move it Without Casting! cuda_status = hipMemcpyToSymbol(BBOX_INT_PARAMS, consts, sizeof(int) * 3); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Integer Constants for Converting BBOX Offs to the Form of Structure of Arrays", "Host", "Device", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); one_over_num_attrs = 1.0 / (float)NUM_OF_BBOX_ATTRS_ON_GPU; //Move it Without Casting! cuda_status = hipMemcpyToSymbol(ONE_OVER_NUM_BB_ATTRS, &one_over_num_attrs, sizeof(float)); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Float Constant", "Host", "Device", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); //Now Move Them to GPU and Store Them in SoA Fashion... hipLaunchKernelGGL(( convertBBOXOffsToSoA), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_offs, dev_bbox_offs); hipDeviceSynchronize(); cuda_status = hipGetLastError(); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_KER_FAILED), "convertBBOXOffsToSoA", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_KER_FAILED); //Release Device Memory... hipFree((void*)d_offs); } //******************************************************************************** //*************************Moving Forests Offs to GPU...************************** //******************************************************************************** __host__ void MemoryManagement::moveForestsOffsToGPU(int *forests_offs, int *dev_forests_offs, int num_of_scale_levels, int num_of_trees, int num_of_features) { hipError_t cuda_status; cuda_status = hipMemcpy((void*)dev_forests_offs, (void*)forests_offs, num_of_scale_levels * num_of_trees * num_of_features * 2 * sizeof(int), hipMemcpyHostToDevice); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Forests Offs", "Host", "Device", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); } //******************************************************************************** //***************************Moving Cur-Frame to GPU...*************************** //******************************************************************************** __host__ void MemoryManagement::moveCurFrameToGPU(ubyte* cur_frame, bool is_keep_prev_on_gpu, bool is_gen_blurred_image) { hipError_t cuda_status; NppStatus npp_status; Npp8u *swap; if(_dev_cur_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Current Frame"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); //*********************************************************************** //***Move Previous to Its Memory Location; Before Copying New Frame...*** //*********************************************************************** if(is_keep_prev_on_gpu) { if(_dev_prev_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Prev Frame"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); swap = _dev_cur_frame; _dev_cur_frame = _dev_prev_frame; _dev_prev_frame = swap; }//End of Outermost if-Block... //*********************************************************************** //**********************Move Current Frame to GPU...********************* //*********************************************************************** if(_dev_temp_frame_buf != NULL) { //Copy Data to a Temporary Buffer in Order to Run NPPI Routine to //Copy it to Its Further Data Aligned Location... cuda_status = hipMemcpy((void*)_dev_temp_frame_buf, (void*)cur_frame, sizeof(Npp8u) * _s_roi->width * _s_roi->height, hipMemcpyHostToDevice); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Temporary Buffer", "Host", "Device", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); if(is_gen_blurred_image) genBlurredFrame(_dev_temp_frame_buf); //Copy it Further to Its Final Destination... npp_status = nppiCopyConstBorder_8u_C1R(_dev_temp_frame_buf, sizeof(Npp8u) * _s_roi->width, *(_s_roi), _dev_cur_frame, sizeof(Npp8u) * _d_roi->width, *(_d_roi), 0, 0, (Npp8u)0); if(npp_status != NPP_SUCCESS) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_NPPI_MEM_CPY), "Current Frame", "Device", "Device", (int)npp_status), mem_management, ER_MEM_MAN_NPPI_MEM_CPY); } else { cuda_status = hipMemcpy((void*)_dev_cur_frame, (void*)cur_frame, sizeof(Npp8u) * _s_roi->width * _s_roi->height, hipMemcpyHostToDevice); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Temporary Buffer", "Host", "Device", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); if(is_gen_blurred_image) genBlurredFrame(_dev_temp_frame_buf); } //Actually, It is not Necessary to Check Whether This Flag is On; //Because There would be No Command Issued to This CUDA Stream; hence No Waiting //in the Case That Flag is Off... if(is_gen_blurred_image) _cv_stream.waitForCompletion(); } __host__ void MemoryManagement::genBlurredFrame(Npp8u *dev_cur_frame) { GpuMat cur_frame_gpu(_s_roi->height, _s_roi->width, CV_8UC1, (void*)dev_cur_frame); GpuMat blurred_cur_frame_gpu(_s_roi->height, _s_roi->width, CV_8UC1, (void*)_dev_blurred_cur_frame); _gaussian_filter_gpu->apply(cur_frame_gpu, blurred_cur_frame_gpu, cv::Rect(0, 0, cur_frame_gpu.cols, cur_frame_gpu.rows), _cv_stream); } //******************************************************************************** //*****************************Moving IIs to Host...****************************** //******************************************************************************** __host__ void MemoryManagement::moveIIsToHost() { hipError_t cuda_status; NppStatus npp_status; Npp32s *temp_buf; Npp32s *src_offset; //***************************Error Check... if(_dev_ii == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Device Ptr for II"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); if(_dev_ii2 == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Device Ptr for II2"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); //************************************Start Copying... //Copy Back Only the Region of Interest; Because //We Have Two Different Types of Paddings Within Image Data on Device: //One for the Sake of Data Alignment and //One for Extra Top and Left Borders Generated Intentionally //By NPPI's SqrIntegral Method... //We Are not Copying Them Concurrently; Because There Had to Be //An Additional Temporary Buffer to Be Used(Waste of Space)... //Plus, It Might Cause OutOfMemoryExceptions for Huge Frames... //Create Temporary Buffer in Order to Run NPPI Routine... cuda_status = hipMalloc((void**)(&temp_buf), sizeof(Npp32s) * _s_roi->width * _s_roi->height); if(cuda_status != hipSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Temporary Buffer", "moveIIsToHost", hipGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); //Copy Region of Interest to Its Final Destination on GPU for II(xROI = 1, yROI = 1)... src_offset = _dev_ii + (_d_roi->width + 1) + 1; npp_status = nppiCopy_32s_C1R(src_offset, sizeof(Npp32s) * (_d_roi->width + 1), temp_buf, sizeof(Npp32s) * _s_roi->width, *(_s_roi)); if(npp_status != NPP_SUCCESS) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_NPPI_MEM_CPY), "Integral Image", "Device", "Device", (int)npp_status), mem_management, ER_MEM_MAN_NPPI_MEM_CPY); //Now Copy II to Host... cuda_status = hipMemcpy((void*)_host_ii, (void*)temp_buf, sizeof(Npp32s) * _s_roi->width * _s_roi->height, hipMemcpyDeviceToHost); //Copy Region of Interest to Its Final Destination on GPU for II2(xROI = 1, yROI = 1)... src_offset = _dev_ii2 + (_d_roi->width + 1) + 1; npp_status = nppiCopy_32s_C1R(src_offset, sizeof(Npp32s) * (_d_roi->width + 1), temp_buf, sizeof(Npp32s) * _s_roi->width, *(_s_roi)); if(npp_status != NPP_SUCCESS) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_NPPI_MEM_CPY), "Integral Image 2", "Device", "Device", (int)npp_status), mem_management, ER_MEM_MAN_NPPI_MEM_CPY); //Now Copy II2 to Host... cuda_status = hipMemcpy((void*)_host_ii2, (void*)temp_buf, sizeof(Npp32s) * _s_roi->width * _s_roi->height, hipMemcpyDeviceToHost); //Release Temporary Buffer... cuda_status = hipFree((void*)temp_buf); if(cuda_status != hipSuccess) fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Temporary Buffer", "moveIIsToHost", hipGetErrorString(cuda_status)); } __host__ MemoryManagement::~MemoryManagement() { hipError_t cuda_status; free((void*)_s_roi); free((void*)_d_roi); //Deallocate All Allocated Resources... if(_dev_cur_frame != NULL) nppiFree((void*)_dev_cur_frame); if(_dev_prev_frame != NULL) nppiFree((void*)_dev_prev_frame); if(_dev_blurred_cur_frame != NULL) { cuda_status = hipFree((void*)_dev_blurred_cur_frame); if(cuda_status != hipSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Blurred Current Frame", "Destructor", hipGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_dev_ii != NULL) { cuda_status = hipFree((void*)_dev_ii); if(cuda_status != hipSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Integral Image", "Destructor", hipGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_dev_ii2 != NULL) { cuda_status = hipFree((void*)_dev_ii2); if(cuda_status != hipSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Squared Integral Image", "Destructor", hipGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_dev_temp_frame_buf != NULL) { cuda_status = hipFree((void*)_dev_temp_frame_buf); if(cuda_status != hipSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Temporary Buffer", "Destructor", hipGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_host_ii != NULL) free((void*)_host_ii); if(_host_ii2 != NULL) free((void*)_host_ii2); }
d9a431e96fec2c228a3dc959fe5de0f8dd68391c.cu
/* * * Copyright 2013-2014 METU, Middle East Technical University, Informatics Institute * * This file is part of H-TLD. * * H-TLD is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * H-TLD is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with the one from Surrey University for TLD Algorithm developed by Zdenek Kalal. * If not, see <http://www.gnu.org/licenses/>. * Please contact Alptekin TEMIZEL for more info about * licensing atemizel@metu.edu.tr. * */ /* * mem_management.cu * * Author: Ilker GURCAN */ //TODO Don't Forget to Catch All Exceptions Properly... #define HETLD_EXPORTS #include<iostream> #include<stdio.h> #include<stdlib.h> #include<nppi.h> #include<device_launch_parameters.h> #include<opencv2/gpu/stream_accessor.hpp> #include "hetld_macros.hpp" #include "hetld_errors.hpp" #include "utilities.hpp" #include "mem_management.cuh" using namespace cv::gpu; __host__ MemoryManagement::MemoryManagement(int f_width, int f_height, double sigma) { cudaError cuda_status; cv::gpu::StreamAccessor accessor; //Initialize Module Variables... _s_roi = (NppiSize*)malloc(sizeof(NppiSize)); _s_roi->width = f_width; _s_roi->height = f_height; _d_roi = (NppiSize*)malloc(sizeof(NppiSize)); _d_roi->height = f_height; _cv_cuda_stream = accessor.getStream(_cv_stream); //Check Out http://docs.opencv.org/modules/imgproc/doc/filtering.html#getgaussiankernel _gaussian_filter_gpu = createGaussianFilter_GPU(CV_8UC1, cv::Size((int)(6 * sigma) + 1, (int)(6 * sigma) + 1), sigma, 0.0, cv::BORDER_CONSTANT, cv::BORDER_CONSTANT); //Create an Aligned Data... _dev_cur_frame = nppiMalloc_8u_C1(f_width, f_height, &_d_roi->width); if(_dev_cur_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Current Frame", "Constructor", "nppiMalloc_8u_C1 Error"), mem_management, ER_MEM_MAN_CUDA_MALLOC); _dev_prev_frame = nppiMalloc_8u_C1(f_width, f_height, &_d_roi->width); if(_dev_prev_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Previous Frame", "Constructor", "nppiMalloc_8u_C1 Error"), mem_management, ER_MEM_MAN_CUDA_MALLOC); cuda_status = cudaMalloc((void**)(&_dev_blurred_cur_frame), sizeof(Npp8u) * f_width * f_height); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Blurred Current Frame", "Constructor", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); //Allocate Other Required Resources... cuda_status = cudaMalloc((void **)(&_dev_ii), sizeof(Npp32s) * (_d_roi->width + 1) * (_d_roi->height + 1)); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Integral Image", "Constructor", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); cuda_status = cudaMalloc((void **)(&_dev_ii2), sizeof(Npp32s) * (_d_roi->width + 1) * (_d_roi->height + 1)); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Squared Integral Image", "Constructor", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); _host_ii = (Npp32s*)malloc(sizeof(Npp32s) * f_width * f_height); _host_ii2 = (Npp32s*)malloc(sizeof(Npp32s) * f_width * f_height); //In Order to Allocate it Once Per Process... if(_s_roi->width != _d_roi->width) { cuda_status = cudaMalloc((void**)(&_dev_temp_frame_buf), sizeof(Npp8u) * f_width * f_height); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Temporary Frame Buffer", "Constructor", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); } else _dev_temp_frame_buf = NULL; } __host__ void findKLPForChangingDSF(int total_load, int* num_blocks, int* num_threads_per_block) { cudaDeviceProp *dev_prop; int expected_num_of_blocks; dev_prop = getDeviceProps(0); *num_threads_per_block = findOptimalNumOfTPB(0); expected_num_of_blocks = (int)ceil(total_load / (float)(*num_threads_per_block)); //Some Resources Are Idle! Use As Much of Those Resources As You Can... if(expected_num_of_blocks < dev_prop->multiProcessorCount) { while(*num_threads_per_block > 64 && expected_num_of_blocks < dev_prop->multiProcessorCount) { (*num_threads_per_block) -= 32; expected_num_of_blocks = (int)ceil(total_load / (float)(*num_threads_per_block)); }//End of while-Loop... *num_blocks = expected_num_of_blocks; } else *num_blocks = expected_num_of_blocks; } //******************************************************************************** //***************************Moving BBOX Offs to GPU...*************************** //******************************************************************************** //0-)num_of_bboxes, //1-)total_item_count, //2-)stride //Constant Memory May Broadcast the Same Value to Multiple Thread Registers At Once! //Don't Define Them As Anything Other Than Arrays... It is What CUDA Means By Symbol! __constant__ int BBOX_INT_PARAMS[3]; __constant__ float ONE_OVER_NUM_BB_ATTRS[1]; __global__ void convertBBOXOffsToSoA(int* array_of_structures, int* structure_of_arrays) { int attr_index; int idx_to_soa; int i; for(i = blockIdx.x * blockDim.x + threadIdx.x; i < BBOX_INT_PARAMS[1]; i += BBOX_INT_PARAMS[2]) { attr_index = i - floor(i * ONE_OVER_NUM_BB_ATTRS[0]) * NUM_OF_BBOX_ATTRS_ON_GPU;// i % NUM_OF_BBOX_ATTRS idx_to_soa = attr_index * BBOX_INT_PARAMS[0] + //Ptr to Start Index of the Current Attribute floor(i * ONE_OVER_NUM_BB_ATTRS[0]);//Ptr to That Attribute of This BBOX structure_of_arrays[idx_to_soa] = array_of_structures[i]; }//End of for-Loop... } __host__ void MemoryManagement::moveBBOXOffsToGPU(int *load_balanced_bb_offs, int *dev_bbox_offs, int num_of_bboxes) { cudaError cuda_status; int *d_offs; int num_blocks = 0; int num_threads_per_block = 0; int total_load; int consts[3]; float one_over_num_attrs; total_load = NUM_OF_BBOX_ATTRS_ON_GPU * num_of_bboxes; cuda_status = cudaMalloc((void **)(&d_offs), sizeof(int) * total_load); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "BBOX Offs", "moveBBOXOffsToGPU", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); cuda_status = cudaMemcpy((void*)d_offs, load_balanced_bb_offs, sizeof(int) * total_load, cudaMemcpyHostToDevice); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "BBOX Offs", "Host", "Device", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); //Find Kernel Launch Parameters for High Occupancy... findKLPForChangingDSF(total_load, &num_blocks, &num_threads_per_block); if(num_threads_per_block < 64 || num_blocks <= 0) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_KERNEL_EXEC_CONFIG_PR), "convertBBOXOffsToSoA"), mem_management, ER_MEM_MAN_KERNEL_EXEC_CONFIG_PR); //Integer Constants... consts[0] = num_of_bboxes; consts[1] = total_load; consts[2] = num_blocks * num_threads_per_block; //Move it Without Casting! cuda_status = cudaMemcpyToSymbol(BBOX_INT_PARAMS, consts, sizeof(int) * 3); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Integer Constants for Converting BBOX Offs to the Form of Structure of Arrays", "Host", "Device", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); one_over_num_attrs = 1.0 / (float)NUM_OF_BBOX_ATTRS_ON_GPU; //Move it Without Casting! cuda_status = cudaMemcpyToSymbol(ONE_OVER_NUM_BB_ATTRS, &one_over_num_attrs, sizeof(float)); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Float Constant", "Host", "Device", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); //Now Move Them to GPU and Store Them in SoA Fashion... convertBBOXOffsToSoA<<<num_blocks, num_threads_per_block>>>(d_offs, dev_bbox_offs); cudaDeviceSynchronize(); cuda_status = cudaGetLastError(); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_KER_FAILED), "convertBBOXOffsToSoA", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_KER_FAILED); //Release Device Memory... cudaFree((void*)d_offs); } //******************************************************************************** //*************************Moving Forests Offs to GPU...************************** //******************************************************************************** __host__ void MemoryManagement::moveForestsOffsToGPU(int *forests_offs, int *dev_forests_offs, int num_of_scale_levels, int num_of_trees, int num_of_features) { cudaError cuda_status; cuda_status = cudaMemcpy((void*)dev_forests_offs, (void*)forests_offs, num_of_scale_levels * num_of_trees * num_of_features * 2 * sizeof(int), cudaMemcpyHostToDevice); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Forests Offs", "Host", "Device", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); } //******************************************************************************** //***************************Moving Cur-Frame to GPU...*************************** //******************************************************************************** __host__ void MemoryManagement::moveCurFrameToGPU(ubyte* cur_frame, bool is_keep_prev_on_gpu, bool is_gen_blurred_image) { cudaError cuda_status; NppStatus npp_status; Npp8u *swap; if(_dev_cur_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Current Frame"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); //*********************************************************************** //***Move Previous to Its Memory Location; Before Copying New Frame...*** //*********************************************************************** if(is_keep_prev_on_gpu) { if(_dev_prev_frame == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Prev Frame"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); swap = _dev_cur_frame; _dev_cur_frame = _dev_prev_frame; _dev_prev_frame = swap; }//End of Outermost if-Block... //*********************************************************************** //**********************Move Current Frame to GPU...********************* //*********************************************************************** if(_dev_temp_frame_buf != NULL) { //Copy Data to a Temporary Buffer in Order to Run NPPI Routine to //Copy it to Its Further Data Aligned Location... cuda_status = cudaMemcpy((void*)_dev_temp_frame_buf, (void*)cur_frame, sizeof(Npp8u) * _s_roi->width * _s_roi->height, cudaMemcpyHostToDevice); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Temporary Buffer", "Host", "Device", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); if(is_gen_blurred_image) genBlurredFrame(_dev_temp_frame_buf); //Copy it Further to Its Final Destination... npp_status = nppiCopyConstBorder_8u_C1R(_dev_temp_frame_buf, sizeof(Npp8u) * _s_roi->width, *(_s_roi), _dev_cur_frame, sizeof(Npp8u) * _d_roi->width, *(_d_roi), 0, 0, (Npp8u)0); if(npp_status != NPP_SUCCESS) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_NPPI_MEM_CPY), "Current Frame", "Device", "Device", (int)npp_status), mem_management, ER_MEM_MAN_NPPI_MEM_CPY); } else { cuda_status = cudaMemcpy((void*)_dev_cur_frame, (void*)cur_frame, sizeof(Npp8u) * _s_roi->width * _s_roi->height, cudaMemcpyHostToDevice); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MEM_CPY), "Temporary Buffer", "Host", "Device", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MEM_CPY); if(is_gen_blurred_image) genBlurredFrame(_dev_temp_frame_buf); } //Actually, It is not Necessary to Check Whether This Flag is On; //Because There would be No Command Issued to This CUDA Stream; hence No Waiting //in the Case That Flag is Off... if(is_gen_blurred_image) _cv_stream.waitForCompletion(); } __host__ void MemoryManagement::genBlurredFrame(Npp8u *dev_cur_frame) { GpuMat cur_frame_gpu(_s_roi->height, _s_roi->width, CV_8UC1, (void*)dev_cur_frame); GpuMat blurred_cur_frame_gpu(_s_roi->height, _s_roi->width, CV_8UC1, (void*)_dev_blurred_cur_frame); _gaussian_filter_gpu->apply(cur_frame_gpu, blurred_cur_frame_gpu, cv::Rect(0, 0, cur_frame_gpu.cols, cur_frame_gpu.rows), _cv_stream); } //******************************************************************************** //*****************************Moving IIs to Host...****************************** //******************************************************************************** __host__ void MemoryManagement::moveIIsToHost() { cudaError cuda_status; NppStatus npp_status; Npp32s *temp_buf; Npp32s *src_offset; //***************************Error Check... if(_dev_ii == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Device Ptr for II"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); if(_dev_ii2 == NULL) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_DEV_PTR_NULL), "Device Ptr for II2"), mem_management, ER_MEM_MAN_DEV_PTR_NULL); //************************************Start Copying... //Copy Back Only the Region of Interest; Because //We Have Two Different Types of Paddings Within Image Data on Device: //One for the Sake of Data Alignment and //One for Extra Top and Left Borders Generated Intentionally //By NPPI's SqrIntegral Method... //We Are not Copying Them Concurrently; Because There Had to Be //An Additional Temporary Buffer to Be Used(Waste of Space)... //Plus, It Might Cause OutOfMemoryExceptions for Huge Frames... //Create Temporary Buffer in Order to Run NPPI Routine... cuda_status = cudaMalloc((void**)(&temp_buf), sizeof(Npp32s) * _s_roi->width * _s_roi->height); if(cuda_status != cudaSuccess) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_CUDA_MALLOC), "Temporary Buffer", "moveIIsToHost", cudaGetErrorString(cuda_status)), mem_management, ER_MEM_MAN_CUDA_MALLOC); //Copy Region of Interest to Its Final Destination on GPU for II(xROI = 1, yROI = 1)... src_offset = _dev_ii + (_d_roi->width + 1) + 1; npp_status = nppiCopy_32s_C1R(src_offset, sizeof(Npp32s) * (_d_roi->width + 1), temp_buf, sizeof(Npp32s) * _s_roi->width, *(_s_roi)); if(npp_status != NPP_SUCCESS) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_NPPI_MEM_CPY), "Integral Image", "Device", "Device", (int)npp_status), mem_management, ER_MEM_MAN_NPPI_MEM_CPY); //Now Copy II to Host... cuda_status = cudaMemcpy((void*)_host_ii, (void*)temp_buf, sizeof(Npp32s) * _s_roi->width * _s_roi->height, cudaMemcpyDeviceToHost); //Copy Region of Interest to Its Final Destination on GPU for II2(xROI = 1, yROI = 1)... src_offset = _dev_ii2 + (_d_roi->width + 1) + 1; npp_status = nppiCopy_32s_C1R(src_offset, sizeof(Npp32s) * (_d_roi->width + 1), temp_buf, sizeof(Npp32s) * _s_roi->width, *(_s_roi)); if(npp_status != NPP_SUCCESS) throw HETLDError(format(GETMEMERROR(ER_MEM_MAN_NPPI_MEM_CPY), "Integral Image 2", "Device", "Device", (int)npp_status), mem_management, ER_MEM_MAN_NPPI_MEM_CPY); //Now Copy II2 to Host... cuda_status = cudaMemcpy((void*)_host_ii2, (void*)temp_buf, sizeof(Npp32s) * _s_roi->width * _s_roi->height, cudaMemcpyDeviceToHost); //Release Temporary Buffer... cuda_status = cudaFree((void*)temp_buf); if(cuda_status != cudaSuccess) fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Temporary Buffer", "moveIIsToHost", cudaGetErrorString(cuda_status)); } __host__ MemoryManagement::~MemoryManagement() { cudaError cuda_status; free((void*)_s_roi); free((void*)_d_roi); //Deallocate All Allocated Resources... if(_dev_cur_frame != NULL) nppiFree((void*)_dev_cur_frame); if(_dev_prev_frame != NULL) nppiFree((void*)_dev_prev_frame); if(_dev_blurred_cur_frame != NULL) { cuda_status = cudaFree((void*)_dev_blurred_cur_frame); if(cuda_status != cudaSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Blurred Current Frame", "Destructor", cudaGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_dev_ii != NULL) { cuda_status = cudaFree((void*)_dev_ii); if(cuda_status != cudaSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Integral Image", "Destructor", cudaGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_dev_ii2 != NULL) { cuda_status = cudaFree((void*)_dev_ii2); if(cuda_status != cudaSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Squared Integral Image", "Destructor", cudaGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_dev_temp_frame_buf != NULL) { cuda_status = cudaFree((void*)_dev_temp_frame_buf); if(cuda_status != cudaSuccess) { fprintf(stderr, GETMEMERROR(ER_MEM_MAN_CUDA_FREE), "Temporary Buffer", "Destructor", cudaGetErrorString(cuda_status)); }//End of Innermost-if-Block... } if(_host_ii != NULL) free((void*)_host_ii); if(_host_ii2 != NULL) free((void*)_host_ii2); }
1a4022cb3424af02f787094f98b79b3a074e8746.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_utils.h" #include "macros.hpp" #include <ATen/ExpandUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPUtils.h> #include <c10/core/ScalarType.h> #include <stdio.h> #include <torch/extension.h> /* return the indice of current point in the idxList -1 outside >= 0 inside */ template <typename indice_t> __device__ void is_inside(const int topK, const indice_t *__restrict__ idxList, const indice_t curr_Idx, int *curK) { for (size_t i = 0; i < topK; i++) { // a pixel is inside the splat if idxList contains point index if (idxList[i] == curr_Idx) { *curK = i; return; } // a pixel definitely isn't inside a splat if it's not occupied by any point if (idxList[i] == -1) { *curK = -1; return; } } *curK = -1; return; } /* compute pixel color after removing a point from a merged pixel */ // TODO curPointList probably no necessary, since rhoList and WsList will be // zero at curPointList[k] == -1 template <typename scalar_t, typename indice_t> __device__ void after_removal(const int numColors, const int topK, const int curK, const scalar_t depthThres, const scalar_t *depthList, const indice_t *curPointList, // topK const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { // initialize color with 0.0 for (size_t c = 0; c < numColors; c++) { newColors[c] = 0.0; } // initialize depth with the farthest so far *newDepth = depthList[topK - 1]; scalar_t sumRho = 0.0; int numVisible = 0; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] == 0) ++numVisible; } // if it's the only visible point, then removing it will reveal the // color below assert(numVisible >= 0); if (numVisible == 1) { sumRho = 0.0; // CHECK: should be the second? scalar_t curDepth = depthList[1]; { size_t k = curK + 1; while (k < topK) { // as soon as idxList is -1 or depth > currentDepth+threshold // stop accumulating colors if (curPointList[k] == -1) { break; } if ((depthList[k] - curDepth) > depthThres) { break; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } ++k; } } if (sumRho == 0) sumRho = 1.0; for (size_t c = 0; c < numColors; c++) { newColors[c] /= sumRho; } return; } // not the only point visible: // removing current point involves reweighting rhos for (size_t k = 0; k < numVisible; k++) { if (k == curK) { continue; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } } if (sumRho == 0) sumRho = 1.0; for (size_t c = 0; c < numColors; c++) { newColors[c] /= sumRho; } assert(sumRho > 0); return; } /* compute pixel color after moving a point to a merged pixel */ template <typename scalar_t> __device__ void after_addition(const int numColors, const int topK, const scalar_t rho, const scalar_t *ws, const scalar_t pointDepth, const scalar_t depthThres, const scalar_t *depthList, const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { scalar_t sumRho = rho; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } sumRho += rhoList[k]; } if (sumRho == 0) sumRho = 1.0; for (size_t c = 0; c < numColors; c++) { newColors[c] = rho / sumRho * ws[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } /* compute pixel color after moving a point closer to the screen */ template <typename scalar_t> __device__ void after_drawing_closer(const int numColors, const int topK, const int curK, const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *depthList, // topK const uint8_t *isBehind, // topK scalar_t *newColors, scalar_t *newDepth) { scalar_t curRho = rhoList[curK]; const scalar_t *curW = wsList + curK * numColors; scalar_t pointDepth = depthList[curK]; scalar_t sumRho = curRho; for (size_t k = 0; k < topK; k++) { if (isBehind[k] > 0) { break; } sumRho += rhoList[k]; } // should at least have curRho assert(sumRho > 0); for (size_t c = 0; c < numColors; c++) { newColors[c] = curRho / sumRho * curW[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (isBehind[k] > 0) { break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } template <typename scalar_t> __device__ scalar_t eps_guard(scalar_t v) { // const scalar_t eps = 0.01; const scalar_t eps = 1e-5; if (v < 0) { return v - eps; } if (v >= 0) { return v + eps; } return v; } /* */ template <typename scalar_t, typename indice_t> __global__ void visibility_debug_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const bool considerZ, const indice_t verboseLogIdx, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz, // BxNx1 gradients for z scalar_t *__restrict__ debugTensor // BxHxWx8 log gradients for verboseLogIdx ) { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; const bool logPoint = (curPointIdx == verboseLogIdx); // skip point (gradient=0) if mask == 1 (i.e. point is good) scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); // scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]); // scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]); const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t rhov = rhoValues[curPointIdx]; const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t deltaI = 0; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value if (curPointDepth - newDepth > mergeT) { if (!considerZ) { continue; } scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL / imgWidth * 2 * curPointDepth; scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL / imgHeight * 2 * curPointDepth; assert(newDepth < curPointDepth); scalar_t dz_3d = newDepth - curPointDepth; scalar_t distance2_3d = eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d); scalar_t distance2 = eps_guard(dx * dx + dy * dy); didzv = dldI / distance2_3d * dz_3d; // should rescale to screen space didxv = dldI / distance2 * dx; didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); if (logPoint) { debugTensor[curPixelIdx * 8] = deltaI / distance2 * dx; debugTensor[curPixelIdx * 8 + 1] = deltaI / distance2 * dy; debugTensor[curPixelIdx * 8 + 2] = deltaI / distance2_3d * dz_3d; } } // don't need to change z else { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance2 = eps_guard(dx * dx + dy * dy); // dIdx didxv = dldI / distance2 * dx; // dIdy didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); if (logPoint) { debugTensor[curPixelIdx * 8] = deltaI / distance2 * dx; debugTensor[curPixelIdx * 8 + 1] = deltaI / distance2 * dy; debugTensor[curPixelIdx * 8 + 2] = 0; } } } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0) { // dIdp = (dIdp+) + (dIdp-) scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy)); scalar_t rx = curProjValues[0] - xmin; scalar_t ry = curProjValues[1] - ymin; assert(rx > 0); assert(ry > 0); scalar_t r = max(rx, ry); didxv = dldI * dx / eps_guard((r + distance) * distance) + dldI * dx / eps_guard((distance - r) * distance); didyv = dldI * dy / eps_guard((r + distance) * distance) + dldI * dy / eps_guard((distance - r) * distance); assert(!isnan(didxv)); assert(!isnan(didyv)); if (logPoint) { debugTensor[curPixelIdx * 8] = deltaI * dx / eps_guard((r + distance) * distance) + deltaI * dx / eps_guard((distance - r) * distance); debugTensor[curPixelIdx * 8 + 1] = deltaI * dy / eps_guard((r + distance) * distance) + deltaI * dy / eps_guard((distance - r) * distance); debugTensor[curPixelIdx * 8 + 2] = 0; } } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0.0) { didzv = dldI / eps_guard(newDepth - curPointDepth); } if (logPoint) { debugTensor[curPixelIdx * 8] = 0; debugTensor[curPixelIdx * 8 + 1] = 0; debugTensor[curPixelIdx * 8 + 2] = deltaI / eps_guard(newDepth - curPointDepth); } } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; if (logPoint) { // debugTensor[curPixelIdx * 8] = didxv; // debugTensor[curPixelIdx * 8 + 1] = didyv; // debugTensor[curPixelIdx * 8 + 2] = didzv; debugTensor[curPixelIdx * 8 + 3] = dldI; for (size_t c = 0; c < WDim; c++) { debugTensor[curPixelIdx * 8 + 4 + c] = newColors[c]; } debugTensor[curPixelIdx * 8 + 7] = newDepth; } } // imWidth } // imHeight } // point } // batch } /* */ template <typename scalar_t, typename indice_t> __global__ void visibility_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const bool considerZ, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz) // BxNx1 gradients for z { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t rhov = rhoValues[curPointIdx]; scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value if (curPointDepth - newDepth > mergeT) { if (!considerZ) { continue; } scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL / imgWidth * 2 * curPointDepth; scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL / imgHeight * 2 * curPointDepth; assert(newDepth < curPointDepth); scalar_t dz_3d = newDepth - curPointDepth; scalar_t distance2_3d = eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d); scalar_t distance2 = eps_guard(dx * dx + dy * dy); didzv = dldI / distance2_3d * dz_3d; // should rescale to screen space didxv = dldI / distance2 * dx; didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } // don't need to change z else { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance2 = eps_guard(dx * dx + dy * dy); // dIdx didxv = dldI / distance2 * dx; // dIdy didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0) { // dIdp = (dIdp+) + (dIdp-) scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy)); scalar_t rx = curProjValues[0] - xmin; scalar_t ry = curProjValues[1] - ymin; assert(rx > 0); assert(ry > 0); scalar_t r = max(rx, ry); didxv = dldI * dx / eps_guard((r + distance) * distance) + dldI * dx / eps_guard((distance - r) * distance); didyv = dldI * dy / eps_guard((r + distance) * distance) + dldI * dy / eps_guard((distance - r) * distance); assert(!isnan(didxv)); assert(!isnan(didyv)); } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { didzv = dldI / eps_guard(newDepth - curPointDepth); } } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; } // imWidth } // imHeight } // point } // batch } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_backward_cuda(const double focalLength, const double mergeThres, const bool considerZ, const int localHeight, const int localWidth, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); // initialize with zeros dIdp.zero_(); dIdz.zero_(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.scalar_type(), "visibility_backward_kernel", ([&] { hipLaunchKernelGGL(( visibility_backward_kernel<scalar_t, int64_t>) , dim3(dim3(batchSize, n_blocks, 1)), dim3(n_threads), 0, stream, batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, focalLength, mergeThres, considerZ, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>() // BxNx1 ); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", hipGetErrorString(err)); exit(-1); } return output; } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_debug_backward_cuda(const double focalLength, const double mergeThres, const bool considerZ, const int localHeight, const int localWidth, const int logIdx, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); const int64_t verboseLogIdx = int64_t(logIdx); // initialize with zeros dIdp.zero_(); dIdz.zero_(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // dIdpMap = outputs[:, :, :, :2] // dIdzMap = outputs[:, :, :, 2] // dldI = outputs[:, :, :, 3] // newColor = outputs[:, :, :, 4:7] // newDepth = outputs[:, :, :, 7] at::Tensor debugTensor = at::zeros({batchSize, imgHeight, imgWidth, 8}, colorGrads.options()); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.scalar_type(), "visibility_debug_backward_kernel", ([&] { hipLaunchKernelGGL(( visibility_debug_backward_kernel<scalar_t, int64_t>) , dim3(dim3(batchSize, n_blocks, 1)), dim3(n_threads), 0, stream, batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, focalLength, mergeThres, considerZ, verboseLogIdx, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>(), // BxNx1 debugTensor.data<scalar_t>()); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); output.push_back(debugTensor); hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", hipGetErrorString(err)); exit(-1); } return output; } /* use gaussian filter's gradient, also */ template <typename scalar_t, typename indice_t> __global__ void visibility_reference_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const scalar_t gamma, const bool considerZ, const indice_t verboseLogIdx, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 const scalar_t *__restrict__ Ms, // BxNx2x2 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz, // BxNx1 gradients for z scalar_t *__restrict__ debugTensor // BxHxWx8 log gradients for verboseLogIdx ) { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; const bool logPoint = (curPointIdx == verboseLogIdx); // skip point (gradient=0) if mask == 1 (i.e. point is good) // scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); // scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); // scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]); // scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]); const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t *curMs = Ms + curPointIdx * 4; const scalar_t rhov = rhoValues[curPointIdx]; const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t dL = 0; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; dL += curColorGrad[c]; // deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value // [dIdx;dIdy] = Ga*exp(-dTMd)(-2Md) // dIdz = Ga*exp(-dTMd)(-2Md) const scalar_t dx = (scalar_t(j) - curProjValues[0]); const scalar_t dy = (scalar_t(i) - curProjValues[1]); const scalar_t m11 = curMs[0]; const scalar_t m12 = curMs[1]; const scalar_t m21 = curMs[2]; const scalar_t m22 = curMs[3]; didxv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m11 * dx + m12 * dy); didyv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m21 * dx + m22 * dy); didzv = 0; if (curPointDepth - newDepth > mergeT) { scalar_t dz = newDepth - curPointDepth; didzv = dz / gamma * exp(-dz * dz / gamma); } if (logPoint) { debugTensor[curPixelIdx * 8] = didxv; debugTensor[curPixelIdx * 8 + 1] = didyv; debugTensor[curPixelIdx * 8 + 2] = didzv; } didxv = didxv * dL; didyv = didyv * dL; didzv = didzv * dL; } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; dL += curColorGrad[c]; // deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0) { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t m11 = curMs[0]; scalar_t m12 = curMs[1]; scalar_t m21 = curMs[2]; scalar_t m22 = curMs[3]; didxv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m11 * dx + m12 * dy); didyv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m21 * dx + m22 * dy); didzv = 0; if (logPoint) { debugTensor[curPixelIdx * 8] = didxv; debugTensor[curPixelIdx * 8 + 1] = didyv; debugTensor[curPixelIdx * 8 + 2] = didzv; } didxv = didxv * dL; didyv = didyv * dL; didzv = didzv * dL; } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; /* deltaI += (newColors[c] - curPixelValues[c]); */ dL += curColorGrad[c]; } if (dldI < 0.0) { // if (curPointDepth - newDepth > mergeT) // { scalar_t dz = newDepth - curPointDepth; didzv = 2 * dz / gamma * exp(-dz * dz / gamma); // } } if (logPoint) { debugTensor[curPixelIdx * 8] = didxv; debugTensor[curPixelIdx * 8 + 1] = didyv; debugTensor[curPixelIdx * 8 + 2] = didzv; } didxv = didxv * dL; didyv = didyv * dL; didzv = didzv * dL; } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; if (logPoint) { // debugTensor[curPixelIdx * 8] = didxv; // debugTensor[curPixelIdx * 8 + 1] = didyv; // debugTensor[curPixelIdx * 8 + 2] = didzv; debugTensor[curPixelIdx * 8 + 3] = dldI; for (size_t c = 0; c < WDim; c++) { debugTensor[curPixelIdx * 8 + 4 + c] = newColors[c]; } debugTensor[curPixelIdx * 8 + 7] = newDepth; } } // imWidth } // imHeight } // point } // batch } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_reference_backward_cuda(const double focalLength, const double mergeThres, const double gamma, const bool considerZ, const int localHeight, const int localWidth, const int logIdx, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 const at::Tensor &Ms, // BxNx2x2 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); const int64_t verboseLogIdx = int64_t(logIdx); // initialize with zeros dIdp.zero_(); dIdz.zero_(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // dIdpMap = outputs[:, :, :, :2] // dIdzMap = outputs[:, :, :, 2] // dldI = outputs[:, :, :, 3] // newColor = outputs[:, :, :, 4:7] // newDepth = outputs[:, :, :, 7] at::Tensor debugTensor = at::zeros({batchSize, imgHeight, imgWidth, 8}, colorGrads.options()); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.scalar_type(), "visibility_reference_backward_kernel", ([&] { hipLaunchKernelGGL(( visibility_reference_backward_kernel<scalar_t, int64_t>) , dim3(dim3(batchSize, n_blocks, 1)), dim3(n_threads), 0, stream, batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, scalar_t(focalLength), scalar_t(mergeThres), scalar_t(gamma), considerZ, verboseLogIdx, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 Ms.data<scalar_t>(), // BxNx2x2 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>(), // BxNx1 debugTensor.data<scalar_t>()); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); output.push_back(debugTensor); hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", hipGetErrorString(err)); exit(-1); } return output; }
1a4022cb3424af02f787094f98b79b3a074e8746.cu
#include "cuda_utils.h" #include "macros.hpp" #include <ATen/ExpandUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAUtils.h> #include <c10/core/ScalarType.h> #include <stdio.h> #include <torch/extension.h> /* return the indice of current point in the idxList -1 outside >= 0 inside */ template <typename indice_t> __device__ void is_inside(const int topK, const indice_t *__restrict__ idxList, const indice_t curr_Idx, int *curK) { for (size_t i = 0; i < topK; i++) { // a pixel is inside the splat if idxList contains point index if (idxList[i] == curr_Idx) { *curK = i; return; } // a pixel definitely isn't inside a splat if it's not occupied by any point if (idxList[i] == -1) { *curK = -1; return; } } *curK = -1; return; } /* compute pixel color after removing a point from a merged pixel */ // TODO curPointList probably no necessary, since rhoList and WsList will be // zero at curPointList[k] == -1 template <typename scalar_t, typename indice_t> __device__ void after_removal(const int numColors, const int topK, const int curK, const scalar_t depthThres, const scalar_t *depthList, const indice_t *curPointList, // topK const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { // initialize color with 0.0 for (size_t c = 0; c < numColors; c++) { newColors[c] = 0.0; } // initialize depth with the farthest so far *newDepth = depthList[topK - 1]; scalar_t sumRho = 0.0; int numVisible = 0; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] == 0) ++numVisible; } // if it's the only visible point, then removing it will reveal the // color below assert(numVisible >= 0); if (numVisible == 1) { sumRho = 0.0; // CHECK: should be the second? scalar_t curDepth = depthList[1]; { size_t k = curK + 1; while (k < topK) { // as soon as idxList is -1 or depth > currentDepth+threshold // stop accumulating colors if (curPointList[k] == -1) { break; } if ((depthList[k] - curDepth) > depthThres) { break; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } ++k; } } if (sumRho == 0) sumRho = 1.0; for (size_t c = 0; c < numColors; c++) { newColors[c] /= sumRho; } return; } // not the only point visible: // removing current point involves reweighting rhos for (size_t k = 0; k < numVisible; k++) { if (k == curK) { continue; } for (size_t c = 0; c < numColors; c++) { newColors[c] += wsList[k * numColors + c] * rhoList[k]; } sumRho += rhoList[k]; if (depthList[k] < *newDepth) { *newDepth = depthList[k]; } } if (sumRho == 0) sumRho = 1.0; for (size_t c = 0; c < numColors; c++) { newColors[c] /= sumRho; } assert(sumRho > 0); return; } /* compute pixel color after moving a point to a merged pixel */ template <typename scalar_t> __device__ void after_addition(const int numColors, const int topK, const scalar_t rho, const scalar_t *ws, const scalar_t pointDepth, const scalar_t depthThres, const scalar_t *depthList, const uint8_t *curIsBehind, // topK const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *curPixel, // numColors scalar_t *newColors, // numColors scalar_t *newDepth) { scalar_t sumRho = rho; for (size_t k = 0; k < topK; k++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } sumRho += rhoList[k]; } if (sumRho == 0) sumRho = 1.0; for (size_t c = 0; c < numColors; c++) { newColors[c] = rho / sumRho * ws[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (curIsBehind[k] > 0 || (depthList[k] - depthThres) > pointDepth) { // || (depthList[k] - depthThres) > pointDepth break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } /* compute pixel color after moving a point closer to the screen */ template <typename scalar_t> __device__ void after_drawing_closer(const int numColors, const int topK, const int curK, const scalar_t *wsList, // topKx3 const scalar_t *rhoList, // topKx1 const scalar_t *depthList, // topK const uint8_t *isBehind, // topK scalar_t *newColors, scalar_t *newDepth) { scalar_t curRho = rhoList[curK]; const scalar_t *curW = wsList + curK * numColors; scalar_t pointDepth = depthList[curK]; scalar_t sumRho = curRho; for (size_t k = 0; k < topK; k++) { if (isBehind[k] > 0) { break; } sumRho += rhoList[k]; } // should at least have curRho assert(sumRho > 0); for (size_t c = 0; c < numColors; c++) { newColors[c] = curRho / sumRho * curW[c]; } for (size_t k = 0; k < topK; k++) { for (size_t c = 0; c < numColors; c++) { if (isBehind[k] > 0) { break; } newColors[c] += rhoList[k] / sumRho * wsList[k * numColors + c]; } } *newDepth = min(depthList[0], pointDepth); } template <typename scalar_t> __device__ scalar_t eps_guard(scalar_t v) { // const scalar_t eps = 0.01; const scalar_t eps = 1e-5; if (v < 0) { return v - eps; } if (v >= 0) { return v + eps; } return v; } /* */ template <typename scalar_t, typename indice_t> __global__ void visibility_debug_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const bool considerZ, const indice_t verboseLogIdx, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz, // BxNx1 gradients for z scalar_t *__restrict__ debugTensor // BxHxWx8 log gradients for verboseLogIdx ) { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; const bool logPoint = (curPointIdx == verboseLogIdx); // skip point (gradient=0) if mask == 1 (i.e. point is good) scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); // scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]); // scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]); const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t rhov = rhoValues[curPointIdx]; const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t deltaI = 0; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value if (curPointDepth - newDepth > mergeT) { if (!considerZ) { continue; } scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL / imgWidth * 2 * curPointDepth; scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL / imgHeight * 2 * curPointDepth; assert(newDepth < curPointDepth); scalar_t dz_3d = newDepth - curPointDepth; scalar_t distance2_3d = eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d); scalar_t distance2 = eps_guard(dx * dx + dy * dy); didzv = dldI / distance2_3d * dz_3d; // should rescale to screen space didxv = dldI / distance2 * dx; didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); if (logPoint) { debugTensor[curPixelIdx * 8] = deltaI / distance2 * dx; debugTensor[curPixelIdx * 8 + 1] = deltaI / distance2 * dy; debugTensor[curPixelIdx * 8 + 2] = deltaI / distance2_3d * dz_3d; } } // don't need to change z else { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance2 = eps_guard(dx * dx + dy * dy); // dIdx didxv = dldI / distance2 * dx; // dIdy didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); if (logPoint) { debugTensor[curPixelIdx * 8] = deltaI / distance2 * dx; debugTensor[curPixelIdx * 8 + 1] = deltaI / distance2 * dy; debugTensor[curPixelIdx * 8 + 2] = 0; } } } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0) { // dIdp = (dIdp+) + (dIdp-) scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy)); scalar_t rx = curProjValues[0] - xmin; scalar_t ry = curProjValues[1] - ymin; assert(rx > 0); assert(ry > 0); scalar_t r = max(rx, ry); didxv = dldI * dx / eps_guard((r + distance) * distance) + dldI * dx / eps_guard((distance - r) * distance); didyv = dldI * dy / eps_guard((r + distance) * distance) + dldI * dy / eps_guard((distance - r) * distance); assert(!isnan(didxv)); assert(!isnan(didyv)); if (logPoint) { debugTensor[curPixelIdx * 8] = deltaI * dx / eps_guard((r + distance) * distance) + deltaI * dx / eps_guard((distance - r) * distance); debugTensor[curPixelIdx * 8 + 1] = deltaI * dy / eps_guard((r + distance) * distance) + deltaI * dy / eps_guard((distance - r) * distance); debugTensor[curPixelIdx * 8 + 2] = 0; } } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0.0) { didzv = dldI / eps_guard(newDepth - curPointDepth); } if (logPoint) { debugTensor[curPixelIdx * 8] = 0; debugTensor[curPixelIdx * 8 + 1] = 0; debugTensor[curPixelIdx * 8 + 2] = deltaI / eps_guard(newDepth - curPointDepth); } } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; if (logPoint) { // debugTensor[curPixelIdx * 8] = didxv; // debugTensor[curPixelIdx * 8 + 1] = didyv; // debugTensor[curPixelIdx * 8 + 2] = didzv; debugTensor[curPixelIdx * 8 + 3] = dldI; for (size_t c = 0; c < WDim; c++) { debugTensor[curPixelIdx * 8 + 4 + c] = newColors[c]; } debugTensor[curPixelIdx * 8 + 7] = newDepth; } } // imWidth } // imHeight } // point } // batch } /* */ template <typename scalar_t, typename indice_t> __global__ void visibility_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const bool considerZ, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz) // BxNx1 gradients for z { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t rhov = rhoValues[curPointIdx]; scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value if (curPointDepth - newDepth > mergeT) { if (!considerZ) { continue; } scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t dx_3d = (scalar_t(j) - curProjValues[0]) / focalL / imgWidth * 2 * curPointDepth; scalar_t dy_3d = (scalar_t(i) - curProjValues[1]) / focalL / imgHeight * 2 * curPointDepth; assert(newDepth < curPointDepth); scalar_t dz_3d = newDepth - curPointDepth; scalar_t distance2_3d = eps_guard(dx_3d * dx_3d + dy_3d * dy_3d + dz_3d * dz_3d); scalar_t distance2 = eps_guard(dx * dx + dy * dy); didzv = dldI / distance2_3d * dz_3d; // should rescale to screen space didxv = dldI / distance2 * dx; didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } // don't need to change z else { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance2 = eps_guard(dx * dx + dy * dy); // dIdx didxv = dldI / distance2 * dx; // dIdy didyv = dldI / distance2 * dy; assert(!isnan(didxv)); assert(!isnan(didyv)); } } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0) { // dIdp = (dIdp+) + (dIdp-) scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t distance = sqrt(eps_guard(dx * dx + dy * dy)); scalar_t rx = curProjValues[0] - xmin; scalar_t ry = curProjValues[1] - ymin; assert(rx > 0); assert(ry > 0); scalar_t r = max(rx, ry); didxv = dldI * dx / eps_guard((r + distance) * distance) + dldI * dx / eps_guard((distance - r) * distance); didyv = dldI * dy / eps_guard((r + distance) * distance) + dldI * dy / eps_guard((distance - r) * distance); assert(!isnan(didxv)); assert(!isnan(didyv)); } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; } if (dldI < 0.0) { didzv = dldI / eps_guard(newDepth - curPointDepth); } } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; } // imWidth } // imHeight } // point } // batch } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_backward_cuda(const double focalLength, const double mergeThres, const bool considerZ, const int localHeight, const int localWidth, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); // initialize with zeros dIdp.zero_(); dIdz.zero_(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.scalar_type(), "visibility_backward_kernel", ([&] { visibility_backward_kernel<scalar_t, int64_t> <<<dim3(batchSize, n_blocks, 1), n_threads, 0, stream>>>( batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, focalLength, mergeThres, considerZ, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>() // BxNx1 ); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", cudaGetErrorString(err)); exit(-1); } return output; } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_debug_backward_cuda(const double focalLength, const double mergeThres, const bool considerZ, const int localHeight, const int localWidth, const int logIdx, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); const int64_t verboseLogIdx = int64_t(logIdx); // initialize with zeros dIdp.zero_(); dIdz.zero_(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // dIdpMap = outputs[:, :, :, :2] // dIdzMap = outputs[:, :, :, 2] // dldI = outputs[:, :, :, 3] // newColor = outputs[:, :, :, 4:7] // newDepth = outputs[:, :, :, 7] at::Tensor debugTensor = at::zeros({batchSize, imgHeight, imgWidth, 8}, colorGrads.options()); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.scalar_type(), "visibility_debug_backward_kernel", ([&] { visibility_debug_backward_kernel<scalar_t, int64_t> <<<dim3(batchSize, n_blocks, 1), n_threads, 0, stream>>>( batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, focalLength, mergeThres, considerZ, verboseLogIdx, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>(), // BxNx1 debugTensor.data<scalar_t>()); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); output.push_back(debugTensor); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", cudaGetErrorString(err)); exit(-1); } return output; } /* use gaussian filter's gradient, also */ template <typename scalar_t, typename indice_t> __global__ void visibility_reference_backward_kernel( const int batchSize, const int imgHeight, const int imgWidth, const int localHeight, const int localWidth, const int topK, const int PN, const int projDim, const int WDim, const scalar_t focalL, const scalar_t mergeT, const scalar_t gamma, const bool considerZ, const indice_t verboseLogIdx, const scalar_t *__restrict__ colorGrads, // BxHxWx3 gradient from output const indice_t *__restrict__ pointIdxMap, // BxHxWxtopK const scalar_t *__restrict__ rhoMap, // BxHxWxtopK const scalar_t *__restrict__ wsMap, // BxHxWxtopKx3 const scalar_t *__restrict__ depthMap, // BxHxWxtopK const uint8_t *__restrict__ isBehind, // BxHxWxtopK const scalar_t *__restrict__ pixelValues, // BxHxWx3 const indice_t *__restrict__ boundingBoxes, // BxNx4 xmin ymin xmax ymax const scalar_t *__restrict__ projPoints, // BxNx[2or3], xy1 const scalar_t *__restrict__ pointColors, // BxNx3 const scalar_t *__restrict__ depthValues, // BxNx1 const scalar_t *__restrict__ rhoValues, // BxNx1 const scalar_t *__restrict__ Ms, // BxNx2x2 scalar_t *__restrict__ dIdp, // BxNx2 gradients for screenX and screenY scalar_t *__restrict__ dIdz, // BxNx1 gradients for z scalar_t *__restrict__ debugTensor // BxHxWx8 log gradients for verboseLogIdx ) { // const scalar_t mergeT = scalar_t(mergeThres); // const scalar_t focalL = scalar_t(focalLength); const int numPixels = imgHeight * imgWidth; // loop all points for (int b = blockIdx.x; b < batchSize; b += gridDim.x) { for (indice_t p = threadIdx.x + blockDim.x * blockIdx.y; p < PN; p += blockDim.x * gridDim.y) { const indice_t curPointIdx = b * PN + p; const bool logPoint = (curPointIdx == verboseLogIdx); // skip point (gradient=0) if mask == 1 (i.e. point is good) // scalar_t xmin = scalar_t(boundingBoxes[curPointIdx * 4]); // scalar_t ymin = scalar_t(boundingBoxes[curPointIdx * 4 + 1]); // scalar_t xmax = scalar_t(boundingBoxes[curPointIdx * 4 + 2]); // scalar_t ymax = scalar_t(boundingBoxes[curPointIdx * 4 + 3]); const scalar_t *curPointColor = pointColors + curPointIdx * WDim; const scalar_t *curProjValues = projPoints + curPointIdx * projDim; scalar_t *dIdx = dIdp + curPointIdx * projDim; scalar_t *dIdy = dIdp + curPointIdx * projDim + 1; scalar_t *curdIdz = dIdz + curPointIdx; const scalar_t *curMs = Ms + curPointIdx * 4; const scalar_t rhov = rhoValues[curPointIdx]; const int bH = min(max(0, int(curProjValues[1] - localHeight / 2)), imgHeight); const int eH = max(min(imgHeight, int(curProjValues[1] + localHeight / 2 + 1)), 0); const int bW = min(max(0, int(curProjValues[0] - localWidth / 2)), imgWidth); const int eW = max(min(imgWidth, int(curProjValues[0] + localWidth / 2 + 1)), 0); // loop all pixels for (size_t i = bH; i < eH; i++) { for (size_t j = bW; j < eW; j++) { const indice_t curPixelIdx = (b * numPixels + i * imgWidth + j); const scalar_t *curColorGrad = colorGrads + curPixelIdx * WDim; const scalar_t *curWs = wsMap + curPixelIdx * topK * WDim; const scalar_t *curRhos = rhoMap + curPixelIdx * topK; // const indice_t curClosest = pointIdxMap[curPixelIdx * topK]; // const indice_t curClosestIdx = b * PN + curClosest; const indice_t *curIdxList = pointIdxMap + curPixelIdx * topK; const scalar_t *curPixelValues = pixelValues + curPixelIdx * WDim; const scalar_t *curDepthList = depthMap + curPixelIdx * topK; // const scalar_t curClosestDepth = depthMap[curPixelIdx * topK]; const uint8_t *curIsBehind = isBehind + curPixelIdx * topK; const scalar_t curPointDepth = depthValues[curPointIdx]; // is this pixel inside the splat? int curK; is_inside(topK, curIdxList, curPointIdx, &curK); scalar_t didxv = 0.0; scalar_t didyv = 0.0; scalar_t didzv = 0.0; scalar_t dldI = 0.0; scalar_t newColors[10]; scalar_t dL = 0; scalar_t newDepth; // outside if (curK < 0) { after_addition(WDim, topK, rhov, curPointColor, curPointDepth, mergeT, curDepthList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; dL += curColorGrad[c]; // deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0.0) { // another point at pixel i,j is in front of the current point by // a threshold, need to change z, otherwise moving to that // direction won't change the color value // [dIdx;dIdy] = Ga*exp(-dTMd)(-2Md) // dIdz = Ga*exp(-dTMd)(-2Md) const scalar_t dx = (scalar_t(j) - curProjValues[0]); const scalar_t dy = (scalar_t(i) - curProjValues[1]); const scalar_t m11 = curMs[0]; const scalar_t m12 = curMs[1]; const scalar_t m21 = curMs[2]; const scalar_t m22 = curMs[3]; didxv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m11 * dx + m12 * dy); didyv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m21 * dx + m22 * dy); didzv = 0; if (curPointDepth - newDepth > mergeT) { scalar_t dz = newDepth - curPointDepth; didzv = dz / gamma * exp(-dz * dz / gamma); } if (logPoint) { debugTensor[curPixelIdx * 8] = didxv; debugTensor[curPixelIdx * 8 + 1] = didyv; debugTensor[curPixelIdx * 8 + 2] = didzv; } didxv = didxv * dL; didyv = didyv * dL; didzv = didzv * dL; } } // pixel inside splat else { // is the current point shown? if (curIsBehind[curK] < 1) { // dIdx dIdy and dIdz- after_removal(WDim, topK, curK, mergeT, curDepthList, curIdxList, curIsBehind, curWs, curRhos, curPixelValues, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; dL += curColorGrad[c]; // deltaI += (newColors[c] - curPixelValues[c]); } if (dldI < 0) { scalar_t dx = (scalar_t(j) - curProjValues[0]); scalar_t dy = (scalar_t(i) - curProjValues[1]); scalar_t m11 = curMs[0]; scalar_t m12 = curMs[1]; scalar_t m21 = curMs[2]; scalar_t m22 = curMs[3]; didxv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m11 * dx + m12 * dy); didyv = 2 / rhov * exp(-(m11 * dx * dx + (m12 + m21) * dx * dy + m22 * dy * dy)) * (m21 * dx + m22 * dy); didzv = 0; if (logPoint) { debugTensor[curPixelIdx * 8] = didxv; debugTensor[curPixelIdx * 8 + 1] = didyv; debugTensor[curPixelIdx * 8 + 2] = didzv; } didxv = didxv * dL; didyv = didyv * dL; didzv = didzv * dL; } } // endif (curRhos[curK] > 0) // point is not visible: else { if (!considerZ) continue; // this point is occluded by other points, moving closer will // change the color after_drawing_closer(WDim, topK, curK, curWs, curRhos, curDepthList, curIsBehind, newColors, &newDepth); for (size_t c = 0; c < WDim; c++) { dldI += (newColors[c] - curPixelValues[c]) * curColorGrad[c]; /* deltaI += (newColors[c] - curPixelValues[c]); */ dL += curColorGrad[c]; } if (dldI < 0.0) { // if (curPointDepth - newDepth > mergeT) // { scalar_t dz = newDepth - curPointDepth; didzv = 2 * dz / gamma * exp(-dz * dz / gamma); // } } if (logPoint) { debugTensor[curPixelIdx * 8] = didxv; debugTensor[curPixelIdx * 8 + 1] = didyv; debugTensor[curPixelIdx * 8 + 2] = didzv; } didxv = didxv * dL; didyv = didyv * dL; didzv = didzv * dL; } // endif on top } // endif inside (*curdIdz) += didzv; (*dIdx) += didxv; (*dIdy) += didyv; if (logPoint) { // debugTensor[curPixelIdx * 8] = didxv; // debugTensor[curPixelIdx * 8 + 1] = didyv; // debugTensor[curPixelIdx * 8 + 2] = didzv; debugTensor[curPixelIdx * 8 + 3] = dldI; for (size_t c = 0; c < WDim; c++) { debugTensor[curPixelIdx * 8 + 4 + c] = newColors[c]; } debugTensor[curPixelIdx * 8 + 7] = newDepth; } } // imWidth } // imHeight } // point } // batch } // dIdp BxNx2 dx dy, dIdz BxNx1 std::vector<at::Tensor> visibility_reference_backward_cuda(const double focalLength, const double mergeThres, const double gamma, const bool considerZ, const int localHeight, const int localWidth, const int logIdx, const at::Tensor &colorGrads, // BxHxWxWDim const at::Tensor &pointIdxMap, // BxHxWxtopK const at::Tensor &rhoMap, // BxHxWxtopK const at::Tensor &wsMap, // BxHxWxtopKxWDim const at::Tensor &depthMap, // BxHxWxtopK const at::Tensor &isBehind, // BxHxWxtopK const at::Tensor &pixelValues, // BxHxWxWDim const at::Tensor &boundingBoxes, // BxNx4 const at::Tensor &projPoints, // BxNx[2or3] const at::Tensor &pointColors, // BxNxWDim const at::Tensor &depthValues, // BxNx1 const at::Tensor &rhoValues, // BxNx1 const at::Tensor &Ms, // BxNx2x2 at::Tensor &dIdp, at::Tensor &dIdz) { const int batchSize = pointIdxMap.size(0); const int imgHeight = pointIdxMap.size(1); const int imgWidth = pointIdxMap.size(2); const int topK = pointIdxMap.size(3); const int PN = projPoints.size(1); const int WDim = pointColors.size(2); CHECK(projPoints.size(2) == 2 || projPoints.size(2) == 3); const int projDim = projPoints.size(2); CHECK_EQ(pointColors.size(1), PN); CHECK(colorGrads.size(-1) == wsMap.size(-1) && wsMap.size(-1) == pixelValues.size(-1) && pixelValues.size(-1) == pointColors.size(-1)); std::vector<at::Tensor> outputs; unsigned int n_threads, n_blocks; n_threads = opt_n_threads(PN); n_blocks = min(32, (PN * batchSize + n_threads - 1) / n_threads); const int64_t verboseLogIdx = int64_t(logIdx); // initialize with zeros dIdp.zero_(); dIdz.zero_(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // dIdpMap = outputs[:, :, :, :2] // dIdzMap = outputs[:, :, :, 2] // dldI = outputs[:, :, :, 3] // newColor = outputs[:, :, :, 4:7] // newDepth = outputs[:, :, :, 7] at::Tensor debugTensor = at::zeros({batchSize, imgHeight, imgWidth, 8}, colorGrads.options()); std::vector<at::Tensor> output; AT_DISPATCH_FLOATING_TYPES_AND_HALF( colorGrads.scalar_type(), "visibility_reference_backward_kernel", ([&] { visibility_reference_backward_kernel<scalar_t, int64_t> <<<dim3(batchSize, n_blocks, 1), n_threads, 0, stream>>>( batchSize, imgHeight, imgWidth, localHeight, localWidth, topK, PN, projDim, WDim, scalar_t(focalLength), scalar_t(mergeThres), scalar_t(gamma), considerZ, verboseLogIdx, colorGrads.data<scalar_t>(), // BxHxWx3 pointIdxMap.data<int64_t>(), // BxHxWxtopK rhoMap.data<scalar_t>(), // BxHxWxtopK wsMap.data<scalar_t>(), // BxHxWxtopKx3 depthMap.data<scalar_t>(), // BxHxWxtopK isBehind.data<uint8_t>(), // BxHxWxtopK pixelValues.data<scalar_t>(), // BxHxWx3 boundingBoxes.toType(pointIdxMap.scalar_type()) .data<int64_t>(), // BxNx4 xmin ymin xmax ymax projPoints.data<scalar_t>(), // BxNx[2or3], xy1 pointColors.data<scalar_t>(), // BxNx3 depthValues.data<scalar_t>(), // BxNx1 rhoValues.data<scalar_t>(), // BxNx1 Ms.data<scalar_t>(), // BxNx2x2 dIdp.data<scalar_t>(), // BxNx2 gradients for projX,Y dIdz.data<scalar_t>(), // BxNx1 debugTensor.data<scalar_t>()); // BxHxWx8 })); output.push_back(dIdp); output.push_back(dIdz); output.push_back(debugTensor); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("compute_visiblity_maps_cuda kernel failed: %s\n", cudaGetErrorString(err)); exit(-1); } return output; }
0dabdb0d08c232088a1391ccdafafa512bfd0e3c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vector_log1p.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; REAL *y = NULL; hipMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vector_log1p), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vector_log1p), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vector_log1p), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0dabdb0d08c232088a1391ccdafafa512bfd0e3c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vector_log1p.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int n = 1; const REAL *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const int offset_x = 1; const int stride_x = 1; REAL *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); const int offset_y = 1; const int stride_y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vector_log1p<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vector_log1p<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vector_log1p<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b246c340db9a5723728b5133649f0f4e40d9e4f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************* * Copyright 2011-2012, * Marwan Abdellah: <abdellah.marwan@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation. * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. ********************************************************************/ #ifndef _MULTIPLY_1D_ARRAY_DEVICE_IMPL_CU_ #define _MULTIPLY_1D_ARRAY_DEVICE_IMPL_CU_ #include "cuGlobals.h" #include "Timers/Boost.h" #include "Kernels/Multiply_1D_Arrays_Kernel.cu" template <typename T> extern void cu_Multiply_1D_Arrays_Impl (dim3 cuBlock, dim3 cuGrid, T *devArrayInput_1, T* devArrayInput_2, T* devArrayOutput, int N, cuProfile* profile) { // Create CUDA timer cutCreateTimer(&(profile->kernelTime)); // Reset CUDA timer cutResetTimer(profile->kernelTime); // Start CUDA timer cutStartTimer(profile->kernelTime); // Execute the kernel hipLaunchKernelGGL(( Multiply_1D_Arrays_Kernel) , dim3(cuGrid), dim3(cuBlock) , 0, 0, devArrayInput_1, devArrayInput_2, devArrayOutput, N); // Stop CUDA timer cutStopTimer(profile->kernelTime); // Calculate kernel execution time profile->kernelDuration = cutGetTimerValue(profile->kernelTime); // Check successfull execution of the kernel profile->kernelExecErr = hipPeekAtLastError(); } template void cu_Multiply_1D_Arrays_Impl <char> (dim3 cuBlock, dim3 cuGrid, char *devArrayInput_1, char *devArrayInput_2, char* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <unsigned char> (dim3 cuBlock, dim3 cuGrid, unsigned char *devArrayInput_1, unsigned char *devArrayInput_2, unsigned char* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <int> (dim3 cuBlock, dim3 cuGrid, int *devArrayInput_1, int *devArrayInput_2, int* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <unsigned int> (dim3 cuBlock, dim3 cuGrid, unsigned int *devArrayInput_1, unsigned int *devArrayInput_2, unsigned int* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <float> (dim3 cuBlock, dim3 cuGrid, float *devArrayInput_1, float *devArrayInput_2, float* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <double> (dim3 cuBlock, dim3 cuGrid, double *devArrayInput_1, double *devArrayInput_2, double* devArrayOutput, int N, cuProfile* profile); #endif // _MULTIPLY_1D_ARRAY_DEVICE_IMPL_CU_
b246c340db9a5723728b5133649f0f4e40d9e4f3.cu
/********************************************************************* * Copyright © 2011-2012, * Marwan Abdellah: <abdellah.marwan@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation. * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. ********************************************************************/ #ifndef _MULTIPLY_1D_ARRAY_DEVICE_IMPL_CU_ #define _MULTIPLY_1D_ARRAY_DEVICE_IMPL_CU_ #include "cuGlobals.h" #include "Timers/Boost.h" #include "Kernels/Multiply_1D_Arrays_Kernel.cu" template <typename T> extern void cu_Multiply_1D_Arrays_Impl (dim3 cuBlock, dim3 cuGrid, T *devArrayInput_1, T* devArrayInput_2, T* devArrayOutput, int N, cuProfile* profile) { // Create CUDA timer cutCreateTimer(&(profile->kernelTime)); // Reset CUDA timer cutResetTimer(profile->kernelTime); // Start CUDA timer cutStartTimer(profile->kernelTime); // Execute the kernel Multiply_1D_Arrays_Kernel <<< cuGrid, cuBlock >>> (devArrayInput_1, devArrayInput_2, devArrayOutput, N); // Stop CUDA timer cutStopTimer(profile->kernelTime); // Calculate kernel execution time profile->kernelDuration = cutGetTimerValue(profile->kernelTime); // Check successfull execution of the kernel profile->kernelExecErr = cudaPeekAtLastError(); } template void cu_Multiply_1D_Arrays_Impl <char> (dim3 cuBlock, dim3 cuGrid, char *devArrayInput_1, char *devArrayInput_2, char* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <unsigned char> (dim3 cuBlock, dim3 cuGrid, unsigned char *devArrayInput_1, unsigned char *devArrayInput_2, unsigned char* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <int> (dim3 cuBlock, dim3 cuGrid, int *devArrayInput_1, int *devArrayInput_2, int* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <unsigned int> (dim3 cuBlock, dim3 cuGrid, unsigned int *devArrayInput_1, unsigned int *devArrayInput_2, unsigned int* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <float> (dim3 cuBlock, dim3 cuGrid, float *devArrayInput_1, float *devArrayInput_2, float* devArrayOutput, int N, cuProfile* profile); template void cu_Multiply_1D_Arrays_Impl <double> (dim3 cuBlock, dim3 cuGrid, double *devArrayInput_1, double *devArrayInput_2, double* devArrayOutput, int N, cuProfile* profile); #endif // _MULTIPLY_1D_ARRAY_DEVICE_IMPL_CU_
698a45b8ae8c107069958d4a9c0eb473ea6e63a9.hip
// !!! This is a file automatically generated by hipify!!! #include "trade.h" double randfrom(double min, double max) { double range = (max - min); double div = RAND_MAX / range; return min + (rand() / div); } #define SEEDSTR \ "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n" #define SEEDPARAMS void printSeed(Seed *seed) { printf(SEEDSTR, seed->a, seed->b, seed->c, seed->d, seed->e, seed->f, seed->g, seed->h, seed->i, seed->j, seed->k, seed->l, seed->m, seed->n, seed->o, seed->p); } Seed scanSeed(char *seedStr) { Seed seed; sscanf(seedStr, SEEDSTR, &seed.a, &seed.b, &seed.c, &seed.d, &seed.e, &seed.f, &seed.g, &seed.h, &seed.i, &seed.j, &seed.k, &seed.l, &seed.m, &seed.n, &seed.o, &seed.p); return seed; } DEVICE void printMinute2(Line *line, int cursor) { if (cursor != -1) { printf("~> %-6d | ", cursor + 2); } printf( "%ld OPEN: %-10.5lf HIGH: %-10.5lf LOW: %-10.5lf CLOSE: %-10.5lf " "AVG_C: %-10.5lf\n", line->time, line->open, line->high, line->low, line->close, line->avgCandle); } void createIndicators(Data *data) { #ifdef PLAY data->line = (Line *)malloc(sizeof(Line) * data->nbrMinutes); #endif #ifndef PLAY hipMallocManaged(&data->line, sizeof(Line) * data->nbrMinutes); #endif for (int i = 0; i < data->nbrMinutes; i++) { double avg = -1; if (i > 1405) { double totalCandleSize = 0; int nbrCandles = 0; for (int j = i - 150; j < i - 10; j++) { // &data->minutes[j]; nbrCandles += 1; totalCandleSize += fabs((data->minutes[j].open - data->minutes[j].close)); } avg = totalCandleSize / nbrCandles; } memcpy(&data->line[i], &data->minutes[i], sizeof(Minute)); data->line[i].avgCandle = avg; printMinute2(&data->line[i], i); } } Data loadMinutes(char *path) { Data data; int fd = open(path, O_RDONLY); struct stat buf; fstat(fd, &buf); off_t size = buf.st_size; #ifdef PLAY data.minutes = (Minute *)malloc(size); #endif #ifndef PLAY hipMallocManaged(&data.minutes, size); #endif int rd = read(fd, data.minutes, size); if (rd <= 0) { printf("ERROR LOAD FILE\n"); exit(0); } data.nbrMinutes = size / sizeof(Minute); createIndicators(&data); return data; } DEVICE void printMinute(Minute *minute, int cursor) { if (cursor != -1) { printf("~> %-6d | ", cursor + 2); } printf("%ld OPEN: %-10.5lf HIGH: %-10.5lf LOW: %-10.5lf CLOSE: %-10.5lf\n", minute->time, minute->open, minute->high, minute->low, minute->close); }
698a45b8ae8c107069958d4a9c0eb473ea6e63a9.cu
#include "trade.h" double randfrom(double min, double max) { double range = (max - min); double div = RAND_MAX / range; return min + (rand() / div); } #define SEEDSTR \ "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n" #define SEEDPARAMS void printSeed(Seed *seed) { printf(SEEDSTR, seed->a, seed->b, seed->c, seed->d, seed->e, seed->f, seed->g, seed->h, seed->i, seed->j, seed->k, seed->l, seed->m, seed->n, seed->o, seed->p); } Seed scanSeed(char *seedStr) { Seed seed; sscanf(seedStr, SEEDSTR, &seed.a, &seed.b, &seed.c, &seed.d, &seed.e, &seed.f, &seed.g, &seed.h, &seed.i, &seed.j, &seed.k, &seed.l, &seed.m, &seed.n, &seed.o, &seed.p); return seed; } DEVICE void printMinute2(Line *line, int cursor) { if (cursor != -1) { printf("~> %-6d | ", cursor + 2); } printf( "%ld OPEN: %-10.5lf HIGH: %-10.5lf LOW: %-10.5lf CLOSE: %-10.5lf " "AVG_C: %-10.5lf\n", line->time, line->open, line->high, line->low, line->close, line->avgCandle); } void createIndicators(Data *data) { #ifdef PLAY data->line = (Line *)malloc(sizeof(Line) * data->nbrMinutes); #endif #ifndef PLAY cudaMallocManaged(&data->line, sizeof(Line) * data->nbrMinutes); #endif for (int i = 0; i < data->nbrMinutes; i++) { double avg = -1; if (i > 1405) { double totalCandleSize = 0; int nbrCandles = 0; for (int j = i - 150; j < i - 10; j++) { // &data->minutes[j]; nbrCandles += 1; totalCandleSize += fabs((data->minutes[j].open - data->minutes[j].close)); } avg = totalCandleSize / nbrCandles; } memcpy(&data->line[i], &data->minutes[i], sizeof(Minute)); data->line[i].avgCandle = avg; printMinute2(&data->line[i], i); } } Data loadMinutes(char *path) { Data data; int fd = open(path, O_RDONLY); struct stat buf; fstat(fd, &buf); off_t size = buf.st_size; #ifdef PLAY data.minutes = (Minute *)malloc(size); #endif #ifndef PLAY cudaMallocManaged(&data.minutes, size); #endif int rd = read(fd, data.minutes, size); if (rd <= 0) { printf("ERROR LOAD FILE\n"); exit(0); } data.nbrMinutes = size / sizeof(Minute); createIndicators(&data); return data; } DEVICE void printMinute(Minute *minute, int cursor) { if (cursor != -1) { printf("~> %-6d | ", cursor + 2); } printf("%ld OPEN: %-10.5lf HIGH: %-10.5lf LOW: %-10.5lf CLOSE: %-10.5lf\n", minute->time, minute->open, minute->high, minute->low, minute->close); }
e611a9faa9739bdf65c347a5f73e11dcef0df640.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; double* __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); double* __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-8); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8); double t2_0=0.0f, t3_0=0.0f, t2_1=0.0f, t3_1=0.0f; double b2_0=0.0f, b3_0=0.0f, b2_1=0.0f, b3_1=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ double __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); double __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); double __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); double __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); double __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); double __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); double __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); double __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); double __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); double __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); double __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); double __temp_43__ = (__temp_38__ + 0.083000f * t2_1); double __temp_48__ = (__temp_43__ + 0.083000f * t2_0); double __temp_53__ = (__temp_48__ + 0.083000f * b2_0); double __temp_58__ = (__temp_53__ + 0.083000f * b2_1); double __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ double __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); double __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); double __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); double __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); double __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); double __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); double __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); double __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); double __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); double __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); double __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); double __temp_43__ = (__temp_38__ + 0.083000f * t3_1); double __temp_48__ = (__temp_43__ + 0.083000f * t3_0); double __temp_53__ = (__temp_48__ + 0.083000f * b3_0); double __temp_58__ = (__temp_53__ + 0.083000f * b3_1); double __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = __temp_63__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void j3d13pt(double * h_input, int L, int M, int N, double * __var_0__){ /* Host allocation Begin */ double * input; hipMalloc(&input,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input); } double * __var_1__; hipMalloc(&__var_1__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); double * __var_2__; hipMalloc(&__var_2__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 16; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__var_2__); } /*Host Free End*/
e611a9faa9739bdf65c347a5f73e11dcef0df640.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; double* __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); double* __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-8); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8); double t2_0=0.0f, t3_0=0.0f, t2_1=0.0f, t3_1=0.0f; double b2_0=0.0f, b3_0=0.0f, b2_1=0.0f, b3_1=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ double __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); double __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); double __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); double __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); double __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); double __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); double __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); double __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); double __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); double __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); double __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); double __temp_43__ = (__temp_38__ + 0.083000f * t2_1); double __temp_48__ = (__temp_43__ + 0.083000f * t2_0); double __temp_53__ = (__temp_48__ + 0.083000f * b2_0); double __temp_58__ = (__temp_53__ + 0.083000f * b2_1); double __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ double __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); double __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); double __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); double __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); double __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); double __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); double __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); double __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); double __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); double __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); double __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); double __temp_43__ = (__temp_38__ + 0.083000f * t3_1); double __temp_48__ = (__temp_43__ + 0.083000f * t3_0); double __temp_53__ = (__temp_48__ + 0.083000f * b3_0); double __temp_58__ = (__temp_53__ + 0.083000f * b3_1); double __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); double __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = __temp_63__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void j3d13pt(double * h_input, int L, int M, int N, double * __var_0__){ /* Host allocation Begin */ double * input; cudaMalloc(&input,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input); } double * __var_1__; cudaMalloc(&__var_1__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); double * __var_2__; cudaMalloc(&__var_2__,sizeof(double)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 16; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__var_2__); } /*Host Free End*/
8f8c6cc88692029f1204fcc71e7494027318e2a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // version 0 // global memory only interleaved version // include comments describing your approach __global__ void histogram_global_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // version 1 // shared memory privatized version // include comments describing your approach __global__ void histogram_shared_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // version 2 // your method of optimization using shared memory // include DETAILED comments describing your approach __global__ void histogram_shared_accumulate_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // clipping function // resets bins that have value larger than 127 to 127. // that is if bin[i]>127 then bin[i]=127 __global__ void convert_kernel(unsigned int *bins, unsigned int num_bins) { // insert your code here }
8f8c6cc88692029f1204fcc71e7494027318e2a7.cu
// version 0 // global memory only interleaved version // include comments describing your approach __global__ void histogram_global_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // version 1 // shared memory privatized version // include comments describing your approach __global__ void histogram_shared_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // version 2 // your method of optimization using shared memory // include DETAILED comments describing your approach __global__ void histogram_shared_accumulate_kernel(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) { // insert your code here } // clipping function // resets bins that have value larger than 127 to 127. // that is if bin[i]>127 then bin[i]=127 __global__ void convert_kernel(unsigned int *bins, unsigned int num_bins) { // insert your code here }
931c051aebe7b85a727588eec6de1e711501fae1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include "reference.h" __global__ void vanGenuchten( const double *__restrict__ Ksat, const double *__restrict__ psi, double *__restrict__ C, double *__restrict__ theta, double *__restrict__ K, const int size) { double Se, _theta, _psi, lambda, m, t; int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < size) { lambda = n - 1.0; m = lambda/n; // Compute the volumetric moisture content [eqn 21] _psi = psi[i] * 100.0; if ( _psi < 0.0 ) _theta = (theta_S - theta_R) / pow(1.0 + pow((alpha*(-_psi)),n), m) + theta_R; else _theta = theta_S; theta[i] = _theta; // Compute the effective saturation [eqn 2] Se = (_theta - theta_R)/(theta_S - theta_R); // Compute the hydraulic conductivity [eqn 8] t = 1.0 - pow(1.0-pow(Se,1.0/m), m); K[i] = Ksat[i] * sqrt(Se) * t * t; // Compute the specific moisture storage derivative of eqn (21). // So we have to calculate C = d(theta)/dh. Then the unit is converted into [1/m]. if (_psi < 0.0) C[i] = 100.0 * alpha * n * (1.0/n-1.0)*pow(alpha*abs(_psi), n-1.0) * (theta_R-theta_S) * pow(pow(alpha*abs(_psi), n)+1.0, 1.0/n-2.0); else C[i] = 0.0; } } int main(int argc, char* argv[]) { if (argc != 5) { printf("Usage: ./%s <dimX> <dimY> <dimZ> <repeat>\n", argv[0]); return 1; } const int dimX = atoi(argv[1]); const int dimY = atoi(argv[2]); const int dimZ = atoi(argv[3]); const int repeat = atoi(argv[4]); const int size = dimX * dimY * dimZ; const int size_byte = size * sizeof(double); double *Ksat, *psi, *C, *theta, *K; double *C_ref, *theta_ref, *K_ref; Ksat = new double[size]; psi = new double[size]; C = new double[size]; theta = new double[size]; K = new double[size]; C_ref = new double[size]; theta_ref = new double[size]; K_ref = new double[size]; // arbitrary numbers for (int i = 0; i < size; i++) { Ksat[i] = 1e-6 + (1.0 - 1e-6) * i / size; psi[i] = -100.0 + 101.0 * i / size; } // for verification reference(Ksat, psi, C_ref, theta_ref, K_ref, size); double *d_Ksat, *d_psi, *d_C, *d_theta, *d_K; hipMalloc((void**)&d_Ksat, size_byte); hipMalloc((void**)&d_psi, size_byte); hipMalloc((void**)&d_C, size_byte); hipMalloc((void**)&d_theta, size_byte); hipMalloc((void**)&d_K, size_byte); hipMemcpy(d_Ksat, Ksat, size_byte, hipMemcpyHostToDevice); hipMemcpy(d_psi, psi, size_byte, hipMemcpyHostToDevice); dim3 grids ((size+255)/256); dim3 blocks (256); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( vanGenuchten) , dim3(grids), dim3(blocks) , 0, 0, d_Ksat, d_psi, d_C, d_theta, d_K, size); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(C, d_C, size_byte, hipMemcpyDeviceToHost); hipMemcpy(theta, d_theta, size_byte, hipMemcpyDeviceToHost); hipMemcpy(K, d_K, size_byte, hipMemcpyDeviceToHost); bool ok = true; for (int i = 0; i < size; i++) { if (fabs(C[i] - C_ref[i]) > 1e-3 || fabs(theta[i] - theta_ref[i]) > 1e-3 || fabs(K[i] - K_ref[i]) > 1e-3) { ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); hipFree(d_Ksat); hipFree(d_psi); hipFree(d_C); hipFree(d_theta); hipFree(d_K); delete(Ksat); delete(psi); delete(C); delete(theta); delete(K); delete(C_ref); delete(theta_ref); delete(K_ref); return 0; }
931c051aebe7b85a727588eec6de1e711501fae1.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <cuda.h> #include "reference.h" __global__ void vanGenuchten( const double *__restrict__ Ksat, const double *__restrict__ psi, double *__restrict__ C, double *__restrict__ theta, double *__restrict__ K, const int size) { double Se, _theta, _psi, lambda, m, t; int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < size) { lambda = n - 1.0; m = lambda/n; // Compute the volumetric moisture content [eqn 21] _psi = psi[i] * 100.0; if ( _psi < 0.0 ) _theta = (theta_S - theta_R) / pow(1.0 + pow((alpha*(-_psi)),n), m) + theta_R; else _theta = theta_S; theta[i] = _theta; // Compute the effective saturation [eqn 2] Se = (_theta - theta_R)/(theta_S - theta_R); // Compute the hydraulic conductivity [eqn 8] t = 1.0 - pow(1.0-pow(Se,1.0/m), m); K[i] = Ksat[i] * sqrt(Se) * t * t; // Compute the specific moisture storage derivative of eqn (21). // So we have to calculate C = d(theta)/dh. Then the unit is converted into [1/m]. if (_psi < 0.0) C[i] = 100.0 * alpha * n * (1.0/n-1.0)*pow(alpha*abs(_psi), n-1.0) * (theta_R-theta_S) * pow(pow(alpha*abs(_psi), n)+1.0, 1.0/n-2.0); else C[i] = 0.0; } } int main(int argc, char* argv[]) { if (argc != 5) { printf("Usage: ./%s <dimX> <dimY> <dimZ> <repeat>\n", argv[0]); return 1; } const int dimX = atoi(argv[1]); const int dimY = atoi(argv[2]); const int dimZ = atoi(argv[3]); const int repeat = atoi(argv[4]); const int size = dimX * dimY * dimZ; const int size_byte = size * sizeof(double); double *Ksat, *psi, *C, *theta, *K; double *C_ref, *theta_ref, *K_ref; Ksat = new double[size]; psi = new double[size]; C = new double[size]; theta = new double[size]; K = new double[size]; C_ref = new double[size]; theta_ref = new double[size]; K_ref = new double[size]; // arbitrary numbers for (int i = 0; i < size; i++) { Ksat[i] = 1e-6 + (1.0 - 1e-6) * i / size; psi[i] = -100.0 + 101.0 * i / size; } // for verification reference(Ksat, psi, C_ref, theta_ref, K_ref, size); double *d_Ksat, *d_psi, *d_C, *d_theta, *d_K; cudaMalloc((void**)&d_Ksat, size_byte); cudaMalloc((void**)&d_psi, size_byte); cudaMalloc((void**)&d_C, size_byte); cudaMalloc((void**)&d_theta, size_byte); cudaMalloc((void**)&d_K, size_byte); cudaMemcpy(d_Ksat, Ksat, size_byte, cudaMemcpyHostToDevice); cudaMemcpy(d_psi, psi, size_byte, cudaMemcpyHostToDevice); dim3 grids ((size+255)/256); dim3 blocks (256); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) vanGenuchten <<< grids, blocks >>> (d_Ksat, d_psi, d_C, d_theta, d_K, size); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat); cudaMemcpy(C, d_C, size_byte, cudaMemcpyDeviceToHost); cudaMemcpy(theta, d_theta, size_byte, cudaMemcpyDeviceToHost); cudaMemcpy(K, d_K, size_byte, cudaMemcpyDeviceToHost); bool ok = true; for (int i = 0; i < size; i++) { if (fabs(C[i] - C_ref[i]) > 1e-3 || fabs(theta[i] - theta_ref[i]) > 1e-3 || fabs(K[i] - K_ref[i]) > 1e-3) { ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); cudaFree(d_Ksat); cudaFree(d_psi); cudaFree(d_C); cudaFree(d_theta); cudaFree(d_K); delete(Ksat); delete(psi); delete(C); delete(theta); delete(K); delete(C_ref); delete(theta_ref); delete(K_ref); return 0; }
88270c72f16e373f0ebb6bc64725c5c75745d1f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#define _NTHREAD 512 //#define _NBLOCK 65535 #include<cuda.h> __global__ void _AFFINE_KERNEL(int* ,int ,int ,int ,int ,int ,int ); #include<stdio.h> #include<stdlib.h> #include<string.h> #include<time.h> int main(int argc, char** argv) { int N = 1000; int N_NODES = 100; int data = 1; int _NTHREAD = 1, _NBLOCK = 1; char* readfile, *outfile; if(argc>1) _NTHREAD = atoi(argv[1]); if(argc>2) _NBLOCK = atoi(argv[2]); if(argc>3) data = atoi(argv[3]) + 1; if(argc>4) readfile = argv[4]; int i,j; //srand(time(NULL)); FILE* f; f = fopen(readfile, "r"); j=0; char c; while(1){ c = fgetc(f); if(c=='\n') { j++; c = fgetc(f); if(c!='%') break; } } fscanf(f, "%d", &N_NODES); //printf("---------%d ----------",N_NODES); fscanf(f, "%d", &N_NODES); fscanf(f, "%d", &N); if(2*N<_NTHREAD*_NBLOCK) { printf("%d\n",_NTHREAD*_NBLOCK); fclose(f); return 0; } struct timespec start, end, mid_start, mid_end; double runTime, pre_time, post_time, computeTime; outfile = (char*)malloc(sizeof(char)*(strlen(readfile)+10)); strcpy(outfile, readfile); //strcat(outfile, "in2_"); //strcat(outfile, readfile); strcat(outfile, ".data"); FILE* fp; fp = fopen(outfile, "a"); int a[N],x; for (i = 0; i < N; i++) { fscanf(f, "%d", &a[i]); fscanf(f, "%d", &x); } clock_gettime(CLOCK_MONOTONIC, &start); int _SZ_a_1 = N; int *_DEV_a; hipMalloc((void**) &_DEV_a, sizeof(int)*_SZ_a_1); hipMemcpy(_DEV_a, a, sizeof(int)*_SZ_a_1, hipMemcpyHostToDevice); int _NUM_THREADS = N,_NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=_NUM_THREADS; } else { _THREADS.x=_NTHREAD; _NUM_BLOCKS=(_NUM_THREADS % _NTHREAD == 0)?(_NUM_THREADS/_NTHREAD):((_NUM_THREADS/_NTHREAD)+1); if(_NUM_BLOCKS<_NBLOCK) _BLOCKS.x=_NUM_BLOCKS; else { _BLOCKS.x=_NBLOCK; int temp=_NUM_BLOCKS; _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } } int _CUDA_TILE; clock_gettime(CLOCK_MONOTONIC, &mid_start); for(i=0;i<18;i+=2) for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) hipLaunchKernelGGL(( _AFFINE_KERNEL), dim3(_BLOCKS),dim3(_THREADS), 0, 0, _DEV_a, _SZ_a_1, 1, i, 0, 18, _CUDA_TILE); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &mid_end); hipMemcpy(a, _DEV_a, sizeof(int)*_SZ_a_1, hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &end); pre_time = (double) ((((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000; post_time = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec)) / 1000000000; computeTime = (double) ((((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec) - (((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec)) / 1000000000; runTime = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000; printf("********************************\n"); fprintf(fp,"%d,%d,%d,%d,%d,%.14f,%.14f,%.14f,%.14f,%d\n",N,_NTHREAD*_NBLOCK,_THREADS.x,_BLOCKS.x,data,pre_time,computeTime,post_time,runTime,_CUDA_TILE); //fprintf(fp,"%d,%d,%.14f\n",N_EDGES,data,runTime); //fclose(fp); printf("RUN TIME: %.14f\n", runTime); fclose(fp); fclose(f); return 0; } __global__ void _AFFINE_KERNEL(int* a,int _SZ_a_1,int phi_count, int CUDA_i, int CUDA_L_i,int CUDA_U_i, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; if((CUDA_i<=i)&&(i<(CUDA_i+2))&&(i<CUDA_U_i)){ a[18-i+1]=a[18-i-1]; }}
88270c72f16e373f0ebb6bc64725c5c75745d1f6.cu
//#define _NTHREAD 512 //#define _NBLOCK 65535 #include<cuda.h> __global__ void _AFFINE_KERNEL(int* ,int ,int ,int ,int ,int ,int ); #include<stdio.h> #include<stdlib.h> #include<string.h> #include<time.h> int main(int argc, char** argv) { int N = 1000; int N_NODES = 100; int data = 1; int _NTHREAD = 1, _NBLOCK = 1; char* readfile, *outfile; if(argc>1) _NTHREAD = atoi(argv[1]); if(argc>2) _NBLOCK = atoi(argv[2]); if(argc>3) data = atoi(argv[3]) + 1; if(argc>4) readfile = argv[4]; int i,j; //srand(time(NULL)); FILE* f; f = fopen(readfile, "r"); j=0; char c; while(1){ c = fgetc(f); if(c=='\n') { j++; c = fgetc(f); if(c!='%') break; } } fscanf(f, "%d", &N_NODES); //printf("---------%d ----------",N_NODES); fscanf(f, "%d", &N_NODES); fscanf(f, "%d", &N); if(2*N<_NTHREAD*_NBLOCK) { printf("%d\n",_NTHREAD*_NBLOCK); fclose(f); return 0; } struct timespec start, end, mid_start, mid_end; double runTime, pre_time, post_time, computeTime; outfile = (char*)malloc(sizeof(char)*(strlen(readfile)+10)); strcpy(outfile, readfile); //strcat(outfile, "in2_"); //strcat(outfile, readfile); strcat(outfile, ".data"); FILE* fp; fp = fopen(outfile, "a"); int a[N],x; for (i = 0; i < N; i++) { fscanf(f, "%d", &a[i]); fscanf(f, "%d", &x); } clock_gettime(CLOCK_MONOTONIC, &start); int _SZ_a_1 = N; int *_DEV_a; cudaMalloc((void**) &_DEV_a, sizeof(int)*_SZ_a_1); cudaMemcpy(_DEV_a, a, sizeof(int)*_SZ_a_1, cudaMemcpyHostToDevice); int _NUM_THREADS = N,_NUM_BLOCKS=1; int _NUM_TILE=1; dim3 _THREADS(512); dim3 _BLOCKS(1); if(_NUM_THREADS < _NTHREAD) { _THREADS.x=_NUM_THREADS; } else { _THREADS.x=_NTHREAD; _NUM_BLOCKS=(_NUM_THREADS % _NTHREAD == 0)?(_NUM_THREADS/_NTHREAD):((_NUM_THREADS/_NTHREAD)+1); if(_NUM_BLOCKS<_NBLOCK) _BLOCKS.x=_NUM_BLOCKS; else { _BLOCKS.x=_NBLOCK; int temp=_NUM_BLOCKS; _NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1); } } int _CUDA_TILE; clock_gettime(CLOCK_MONOTONIC, &mid_start); for(i=0;i<18;i+=2) for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++) _AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_a, _SZ_a_1, 1, i, 0, 18, _CUDA_TILE); cudaDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &mid_end); cudaMemcpy(a, _DEV_a, sizeof(int)*_SZ_a_1, cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &end); pre_time = (double) ((((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000; post_time = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec)) / 1000000000; computeTime = (double) ((((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec) - (((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec)) / 1000000000; runTime = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000; printf("********************************\n"); fprintf(fp,"%d,%d,%d,%d,%d,%.14f,%.14f,%.14f,%.14f,%d\n",N,_NTHREAD*_NBLOCK,_THREADS.x,_BLOCKS.x,data,pre_time,computeTime,post_time,runTime,_CUDA_TILE); //fprintf(fp,"%d,%d,%.14f\n",N_EDGES,data,runTime); //fclose(fp); printf("RUN TIME: %.14f\n", runTime); fclose(fp); fclose(f); return 0; } __global__ void _AFFINE_KERNEL(int* a,int _SZ_a_1,int phi_count, int CUDA_i, int CUDA_L_i,int CUDA_U_i, int _CUDA_TILE) { int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x; if((CUDA_i<=i)&&(i<(CUDA_i+2))&&(i<CUDA_U_i)){ a[18-i+1]=a[18-i-1]; }}
be8abf64c5d450e802fc5117da09a9667d6c29c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../knn.h" #include "hipcub/hipcub.hpp" #include <limits> namespace knn { #include "kernel_hip.cuh" // device data template <typename T> struct Device { T *pts; T *dist2s; //distance squared sums float *dists; int* indexes; hipcub::KeyValuePair<int, T> *indexDist; //hipcub::ArgMin output void *tmp; //hipcub::ArgMin storage }; // host data struct Host { int k; //number of nearest neighbors int ndim; //number of dimensions int npt; //number of points size_t cubTmpSize; //nbytes of storage required by cub::min }; template <typename T> void allocate_device_storage( Host *h, Device<T> *d ) { size_t nbytes = sizeof(T) * h->npt * h->ndim; hipError_t err = hipMalloc( &d->pts, nbytes ); nbytes = sizeof(T) * h->npt; err = hipMalloc( &d->dist2s, nbytes ); nbytes = sizeof(float) * h->npt * h->k; err = hipMalloc( &d->dists, nbytes ); nbytes = sizeof(int) * h->npt * h->k; err = hipMalloc( &d->indexes, nbytes); nbytes = sizeof(hipcub::KeyValuePair<int, T>); err = hipMalloc( &d->indexDist, nbytes ); d->tmp = NULL; nbytes = 0; hipcub::DeviceReduce::ArgMin( d->tmp, nbytes, d->dist2s, d->indexDist, h->npt ); err = hipMalloc( &d->tmp, nbytes ); return; } template <typename T> void free_device_storage( Device<T> *d ) { if (d->pts != NULL) hipFree( d->pts ); if (d->dist2s != NULL) hipFree( d->dist2s ); if (d->dists != NULL) hipFree( d->dists ); if (d->indexes != NULL) hipFree( d->indexes ); if (d->tmp != NULL) hipFree( d->tmp ); if (d->indexDist != NULL) hipFree( d->indexDist ); return; } template <typename T> void calc_dist2s( const Host *h, const int ctrPtIdx, const int idim, Device<T> *d ) { dim3 bsize (BLOCK_DIMX,1,1); dim3 gsize (h->npt/bsize.x,1,1); gsize.x += h->npt % bsize.x; if (idim == 0) { hipLaunchKernelGGL(( calc_dist2s_dim0_kernel<T>), dim3(gsize),dim3(bsize), 0, 0, ctrPtIdx, d->pts, h->npt, d->dist2s ); } else { hipLaunchKernelGGL(( calc_dist2s_dimi_kernel<T>), dim3(gsize),dim3(bsize), 0, 0, ctrPtIdx, d->pts + idim*h->npt, h->npt, d->dist2s ); } return; } template <typename T> void find_ith_neighbor( Host *h, const int ctrPtIdx, const int inn, Device<T> *d ) { const T maxValue = std::numeric_limits<T>::max(); //don't want self as nearest nbor. Set distance to self to maxValue if (inn == 0) { hipError_t err = hipMemcpy( d->dist2s+ctrPtIdx, &maxValue, sizeof(T), hipMemcpyHostToDevice ); } hipcub::DeviceReduce::ArgMin( d->tmp, h->cubTmpSize, d->dist2s, d->indexDist, h->npt ); //set distance of the ith n.n. to maxValue so next one can be found //copy to device output arrays hipLaunchKernelGGL(( copy_init_kernel<T>), dim3(1),dim3(1), 0, 0, maxValue, d->dist2s, d->indexDist, d->indexes + ctrPtIdx*h->k + inn, d->dists + ctrPtIdx*h->k + inn ); return; } template <typename T> void knn( const int k, const int ndim, const int npt, const T *const pts_in, int *const indexes_out, float *const dists_out ) { //set host parameters knn::Host hostParams; hostParams.k = k; hostParams.ndim = ndim; hostParams.npt = npt; hostParams.cubTmpSize = 0; //allocate device storage knn::Device<T> deviceParams; knn::allocate_device_storage<T>( &hostParams, &deviceParams); //copy input data to device memory size_t nbytes = sizeof(T) * ndim * npt; hipError_t err = hipMemcpy( deviceParams.pts, pts_in, nbytes, hipMemcpyHostToDevice ); //brutishly calculate nearest neighbors for (int ctrPtIdx = 0; ctrPtIdx < npt; ++ctrPtIdx) { for (int idim = 0; idim < ndim; ++idim) { knn::calc_dist2s<T>( &hostParams, ctrPtIdx, idim, &deviceParams ); } for (int inn = 0; inn < k; ++inn) { knn::find_ith_neighbor<T>( &hostParams, ctrPtIdx, inn, &deviceParams ); } } //copy result data to host memory nbytes = sizeof(int) * npt * k; err = hipMemcpy( indexes_out, deviceParams.indexes, nbytes, hipMemcpyDeviceToHost ); nbytes = sizeof(float) * npt * k; err = hipMemcpy( dists_out, deviceParams.dists, nbytes, hipMemcpyDeviceToHost ); //free device storage knn::free_device_storage<T>( &deviceParams ); return; } } /*namespace knn*/ void knn_float( const int k, const int ndim, const int npt, const float *const pts_in, int *const indexes_out, float *const dists_out ) { knn::knn<float>( k, ndim, npt, pts_in, indexes_out, dists_out ); return; } void knn_double( const int k, const int ndim, const int npt, const double *const pts_in, int *const indexes_out, float *const dists_out ) { knn::knn<double>( k, ndim, npt, pts_in, indexes_out, dists_out ); return; }
be8abf64c5d450e802fc5117da09a9667d6c29c9.cu
#include "../knn.h" #include "cub/cub.cuh" #include <limits> namespace knn { #include "kernel.cuh" // device data template <typename T> struct Device { T *pts; T *dist2s; //distance squared sums float *dists; int* indexes; cub::KeyValuePair<int, T> *indexDist; //cub::ArgMin output void *tmp; //cub::ArgMin storage }; // host data struct Host { int k; //number of nearest neighbors int ndim; //number of dimensions int npt; //number of points size_t cubTmpSize; //nbytes of storage required by cub::min }; template <typename T> void allocate_device_storage( Host *h, Device<T> *d ) { size_t nbytes = sizeof(T) * h->npt * h->ndim; cudaError_t err = cudaMalloc( &d->pts, nbytes ); nbytes = sizeof(T) * h->npt; err = cudaMalloc( &d->dist2s, nbytes ); nbytes = sizeof(float) * h->npt * h->k; err = cudaMalloc( &d->dists, nbytes ); nbytes = sizeof(int) * h->npt * h->k; err = cudaMalloc( &d->indexes, nbytes); nbytes = sizeof(cub::KeyValuePair<int, T>); err = cudaMalloc( &d->indexDist, nbytes ); d->tmp = NULL; nbytes = 0; cub::DeviceReduce::ArgMin( d->tmp, nbytes, d->dist2s, d->indexDist, h->npt ); err = cudaMalloc( &d->tmp, nbytes ); return; } template <typename T> void free_device_storage( Device<T> *d ) { if (d->pts != NULL) cudaFree( d->pts ); if (d->dist2s != NULL) cudaFree( d->dist2s ); if (d->dists != NULL) cudaFree( d->dists ); if (d->indexes != NULL) cudaFree( d->indexes ); if (d->tmp != NULL) cudaFree( d->tmp ); if (d->indexDist != NULL) cudaFree( d->indexDist ); return; } template <typename T> void calc_dist2s( const Host *h, const int ctrPtIdx, const int idim, Device<T> *d ) { dim3 bsize (BLOCK_DIMX,1,1); dim3 gsize (h->npt/bsize.x,1,1); gsize.x += h->npt % bsize.x; if (idim == 0) { calc_dist2s_dim0_kernel<T><<<gsize,bsize>>>( ctrPtIdx, d->pts, h->npt, d->dist2s ); } else { calc_dist2s_dimi_kernel<T><<<gsize,bsize>>>( ctrPtIdx, d->pts + idim*h->npt, h->npt, d->dist2s ); } return; } template <typename T> void find_ith_neighbor( Host *h, const int ctrPtIdx, const int inn, Device<T> *d ) { const T maxValue = std::numeric_limits<T>::max(); //don't want self as nearest nbor. Set distance to self to maxValue if (inn == 0) { cudaError_t err = cudaMemcpy( d->dist2s+ctrPtIdx, &maxValue, sizeof(T), cudaMemcpyHostToDevice ); } cub::DeviceReduce::ArgMin( d->tmp, h->cubTmpSize, d->dist2s, d->indexDist, h->npt ); //set distance of the ith n.n. to maxValue so next one can be found //copy to device output arrays copy_init_kernel<T><<<1,1>>>( maxValue, d->dist2s, d->indexDist, d->indexes + ctrPtIdx*h->k + inn, d->dists + ctrPtIdx*h->k + inn ); return; } template <typename T> void knn( const int k, const int ndim, const int npt, const T *const pts_in, int *const indexes_out, float *const dists_out ) { //set host parameters knn::Host hostParams; hostParams.k = k; hostParams.ndim = ndim; hostParams.npt = npt; hostParams.cubTmpSize = 0; //allocate device storage knn::Device<T> deviceParams; knn::allocate_device_storage<T>( &hostParams, &deviceParams); //copy input data to device memory size_t nbytes = sizeof(T) * ndim * npt; cudaError_t err = cudaMemcpy( deviceParams.pts, pts_in, nbytes, cudaMemcpyHostToDevice ); //brutishly calculate nearest neighbors for (int ctrPtIdx = 0; ctrPtIdx < npt; ++ctrPtIdx) { for (int idim = 0; idim < ndim; ++idim) { knn::calc_dist2s<T>( &hostParams, ctrPtIdx, idim, &deviceParams ); } for (int inn = 0; inn < k; ++inn) { knn::find_ith_neighbor<T>( &hostParams, ctrPtIdx, inn, &deviceParams ); } } //copy result data to host memory nbytes = sizeof(int) * npt * k; err = cudaMemcpy( indexes_out, deviceParams.indexes, nbytes, cudaMemcpyDeviceToHost ); nbytes = sizeof(float) * npt * k; err = cudaMemcpy( dists_out, deviceParams.dists, nbytes, cudaMemcpyDeviceToHost ); //free device storage knn::free_device_storage<T>( &deviceParams ); return; } } /*namespace knn*/ void knn_float( const int k, const int ndim, const int npt, const float *const pts_in, int *const indexes_out, float *const dists_out ) { knn::knn<float>( k, ndim, npt, pts_in, indexes_out, dists_out ); return; } void knn_double( const int k, const int ndim, const int npt, const double *const pts_in, int *const indexes_out, float *const dists_out ) { knn::knn<double>( k, ndim, npt, pts_in, indexes_out, dists_out ); return; }
894a23d508e3c88ef739ee6f9a8e9ee2e50c1ff1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define MINUS_LOG_THRESHOLD -18.42 #define SOFTMAX_THREADS 128 __global__ void cunn_SoftMax_updateOutput_kernel(float *output, float *input, int nframe, int dim) { __shared__ float buffer[SOFTMAX_THREADS+1]; int k = blockIdx.x; float *input_k = input + k*dim; float *output_k = output + k*dim; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // max? buffer[threadIdx.x] = -FLT_MAX; for (int i=i_start; i<i_end; i+=i_step) { float z = input_k[i]; if(buffer[threadIdx.x] < z) buffer[threadIdx.x] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float max_k = -FLT_MAX; for (int i=0; i<blockDim.x; i++) { if(max_k < buffer[i]) max_k = buffer[i]; } buffer[SOFTMAX_THREADS] = max_k; } __syncthreads(); // sum? float max_k = buffer[SOFTMAX_THREADS]; buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = __expf(input_k[i]-max_k); buffer[threadIdx.x] += z; output_k[i] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[SOFTMAX_THREADS] = sum_k; } __syncthreads(); // softmax float sum_k = buffer[SOFTMAX_THREADS]; for (int i=i_start; i<i_end; i+=i_step) output_k[i] = output_k[i] / sum_k; } __global__ void cunn_SoftMax_updateGradInput_kernel(float *gradInput, float *output, float *gradOutput, int nframe, int dim) { __shared__ float buffer[SOFTMAX_THREADS]; int k = blockIdx.x; float *gradInput_k = gradInput + k*dim; float *output_k = output + k*dim; float *gradOutput_k = gradOutput + k*dim; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // sum? buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) buffer[threadIdx.x] += gradOutput_k[i] * output_k[i]; __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[0] = sum_k; } __syncthreads(); float sum_k = buffer[0]; for (int i=i_start; i<i_end; i+=i_step) gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k); } static int cunn_SoftMax_updateOutput(lua_State *L) { THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); input = THCudaTensor_newContiguous(input); THCudaTensor_resizeAs(output, input); if(input->nDimension == 1) { dim3 blocks(1); dim3 threads(SOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_SoftMax_updateOutput_kernel), dim3(blocks),dim3(threads), 0, 0, THCudaTensor_data(output), THCudaTensor_data(input), 1, input->size[0]); } else if(input->nDimension == 2) { dim3 blocks(input->size[0]); dim3 threads(SOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_SoftMax_updateOutput_kernel), dim3(blocks),dim3(threads), 0, 0, THCudaTensor_data(output), THCudaTensor_data(input), input->size[0], input->size[1]); } else THError("vector or matrix expected"); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); THCudaTensor_free(input); return 1; } struct softmaxupdateGradInput_functor { float value; softmaxupdateGradInput_functor(float value_) : value(value_) {} __host__ __device__ float operator()(const float& output, const float& gradOutput) const { return gradOutput - exp(output)*value; } }; static int cunn_SoftMax_updateGradInput(lua_State *L) { THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); output = THCudaTensor_newContiguous(output); gradOutput = THCudaTensor_newContiguous(gradOutput); THCudaTensor_resizeAs(gradInput, output); if(gradInput->nDimension == 1) { dim3 blocks(1); dim3 threads(SOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_SoftMax_updateGradInput_kernel), dim3(blocks),dim3(threads), 0, 0, THCudaTensor_data(gradInput), THCudaTensor_data(output), THCudaTensor_data(gradOutput), 1, gradInput->size[0]); } else if(gradInput->nDimension == 2) { dim3 blocks(gradInput->size[0]); dim3 threads(SOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_SoftMax_updateGradInput_kernel), dim3(blocks),dim3(threads), 0, 0, THCudaTensor_data(gradInput), THCudaTensor_data(output), THCudaTensor_data(gradOutput), gradInput->size[0], gradInput->size[1]); } else THError("vector or matrix expected"); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); THCudaTensor_free(gradOutput); THCudaTensor_free(output); return 1; } static const struct luaL_Reg cunn_SoftMax__ [] = { {"SoftMax_updateOutput", cunn_SoftMax_updateOutput}, {"SoftMax_updateGradInput", cunn_SoftMax_updateGradInput}, {NULL, NULL} }; static void cunn_SoftMax_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SoftMax__, "nn"); lua_pop(L,1); }
894a23d508e3c88ef739ee6f9a8e9ee2e50c1ff1.cu
#define MINUS_LOG_THRESHOLD -18.42 #define SOFTMAX_THREADS 128 __global__ void cunn_SoftMax_updateOutput_kernel(float *output, float *input, int nframe, int dim) { __shared__ float buffer[SOFTMAX_THREADS+1]; int k = blockIdx.x; float *input_k = input + k*dim; float *output_k = output + k*dim; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // max? buffer[threadIdx.x] = -FLT_MAX; for (int i=i_start; i<i_end; i+=i_step) { float z = input_k[i]; if(buffer[threadIdx.x] < z) buffer[threadIdx.x] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float max_k = -FLT_MAX; for (int i=0; i<blockDim.x; i++) { if(max_k < buffer[i]) max_k = buffer[i]; } buffer[SOFTMAX_THREADS] = max_k; } __syncthreads(); // sum? float max_k = buffer[SOFTMAX_THREADS]; buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = __expf(input_k[i]-max_k); buffer[threadIdx.x] += z; output_k[i] = z; } __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[SOFTMAX_THREADS] = sum_k; } __syncthreads(); // softmax float sum_k = buffer[SOFTMAX_THREADS]; for (int i=i_start; i<i_end; i+=i_step) output_k[i] = output_k[i] / sum_k; } __global__ void cunn_SoftMax_updateGradInput_kernel(float *gradInput, float *output, float *gradOutput, int nframe, int dim) { __shared__ float buffer[SOFTMAX_THREADS]; int k = blockIdx.x; float *gradInput_k = gradInput + k*dim; float *output_k = output + k*dim; float *gradOutput_k = gradOutput + k*dim; int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; // sum? buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) buffer[threadIdx.x] += gradOutput_k[i] * output_k[i]; __syncthreads(); // reduce if (threadIdx.x == 0) { float sum_k = 0; for (int i=0; i<blockDim.x; i++) sum_k += buffer[i]; buffer[0] = sum_k; } __syncthreads(); float sum_k = buffer[0]; for (int i=i_start; i<i_end; i+=i_step) gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k); } static int cunn_SoftMax_updateOutput(lua_State *L) { THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); input = THCudaTensor_newContiguous(input); THCudaTensor_resizeAs(output, input); if(input->nDimension == 1) { dim3 blocks(1); dim3 threads(SOFTMAX_THREADS); cunn_SoftMax_updateOutput_kernel<<<blocks,threads>>>(THCudaTensor_data(output), THCudaTensor_data(input), 1, input->size[0]); } else if(input->nDimension == 2) { dim3 blocks(input->size[0]); dim3 threads(SOFTMAX_THREADS); cunn_SoftMax_updateOutput_kernel<<<blocks,threads>>>(THCudaTensor_data(output), THCudaTensor_data(input), input->size[0], input->size[1]); } else THError("vector or matrix expected"); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); THCudaTensor_free(input); return 1; } struct softmaxupdateGradInput_functor { float value; softmaxupdateGradInput_functor(float value_) : value(value_) {} __host__ __device__ float operator()(const float& output, const float& gradOutput) const { return gradOutput - exp(output)*value; } }; static int cunn_SoftMax_updateGradInput(lua_State *L) { THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); output = THCudaTensor_newContiguous(output); gradOutput = THCudaTensor_newContiguous(gradOutput); THCudaTensor_resizeAs(gradInput, output); if(gradInput->nDimension == 1) { dim3 blocks(1); dim3 threads(SOFTMAX_THREADS); cunn_SoftMax_updateGradInput_kernel<<<blocks,threads>>>(THCudaTensor_data(gradInput), THCudaTensor_data(output), THCudaTensor_data(gradOutput), 1, gradInput->size[0]); } else if(gradInput->nDimension == 2) { dim3 blocks(gradInput->size[0]); dim3 threads(SOFTMAX_THREADS); cunn_SoftMax_updateGradInput_kernel<<<blocks,threads>>>(THCudaTensor_data(gradInput), THCudaTensor_data(output), THCudaTensor_data(gradOutput), gradInput->size[0], gradInput->size[1]); } else THError("vector or matrix expected"); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); THCudaTensor_free(gradOutput); THCudaTensor_free(output); return 1; } static const struct luaL_Reg cunn_SoftMax__ [] = { {"SoftMax_updateOutput", cunn_SoftMax_updateOutput}, {"SoftMax_updateGradInput", cunn_SoftMax_updateGradInput}, {NULL, NULL} }; static void cunn_SoftMax_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SoftMax__, "nn"); lua_pop(L,1); }
4305f8e9edc63f8b207d84a3660d7c4f55d76bcf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_top; int xdim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_top; int ydim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_top; int xdim1_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_top; int ydim1_update_halo_kernel2_zvel_plus_4_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim0_update_halo_kernel2_zvel_plus_4_top * \ ydim0_update_halo_kernel2_zvel_plus_4_top * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim1_update_halo_kernel2_zvel_plus_4_top * \ ydim1_update_halo_kernel2_zvel_plus_4_top * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_4_top_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, -4, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, -4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_top( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top * ydim0_update_halo_kernel2_zvel_plus_4_top; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top * ydim1_update_halo_kernel2_zvel_plus_4_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_top_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 95)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(95, "update_halo_kernel2_zvel_plus_4_top"); OPS_kernels[95].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_top_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_top, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_4_top_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_top, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_4_top_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_top, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_4_top_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_top, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_4_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[95].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_top), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[95].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[95].mpi_time += t2 - t1; OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
4305f8e9edc63f8b207d84a3660d7c4f55d76bcf.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_top; int xdim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_top; int ydim0_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_top; int xdim1_update_halo_kernel2_zvel_plus_4_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_top; int ydim1_update_halo_kernel2_zvel_plus_4_top_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim0_update_halo_kernel2_zvel_plus_4_top * \ ydim0_update_halo_kernel2_zvel_plus_4_top * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_4_top * (y) + \ xdim1_update_halo_kernel2_zvel_plus_4_top * \ ydim1_update_halo_kernel2_zvel_plus_4_top * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_4_top_gpu(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, -4, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, -4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_top( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_top * ydim0_update_halo_kernel2_zvel_plus_4_top; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_top * ydim1_update_halo_kernel2_zvel_plus_4_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_top_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_top(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 95)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(95, "update_halo_kernel2_zvel_plus_4_top"); OPS_kernels[95].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_top_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_top, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_4_top_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_top, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_4_top_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_top, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_4_top_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_top, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_4_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[95].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_4_top<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[95].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[95].mpi_time += t2 - t1; OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
e097f9a65ce8418775aaaaefeda1e7ea54d07922.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <thrust/random.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <hip/hip_cooperative_groups.h> namespace cg = cooperative_groups; #include <helper_cuda.h> //////////////////////////////////////////////////////////////////////////////// // A structure of 2D points (structure of arrays). //////////////////////////////////////////////////////////////////////////////// class Points { float *m_x; float *m_y; public: // Constructor. __host__ __device__ Points() : m_x(NULL), m_y(NULL) {} // Constructor. __host__ __device__ Points(float *x, float *y) : m_x(x), m_y(y) {} // Get a point. __host__ __device__ __forceinline__ float2 get_point(int idx) const { return make_float2(m_x[idx], m_y[idx]); } // Set a point. __host__ __device__ __forceinline__ void set_point(int idx, const float2 &p) { m_x[idx] = p.x; m_y[idx] = p.y; } // Set the pointers. __host__ __device__ __forceinline__ void set(float *x, float *y) { m_x = x; m_y = y; } }; //////////////////////////////////////////////////////////////////////////////// // A 2D bounding box //////////////////////////////////////////////////////////////////////////////// class Bounding_box { // Extreme points of the bounding box. float2 m_p_min; float2 m_p_max; public: // Constructor. Create a unit box. __host__ __device__ Bounding_box() { m_p_min = make_float2(0.0f, 0.0f); m_p_max = make_float2(1.0f, 1.0f); } // Compute the center of the bounding-box. __host__ __device__ void compute_center(float2 &center) const { center.x = 0.5f * (m_p_min.x + m_p_max.x); center.y = 0.5f * (m_p_min.y + m_p_max.y); } // The points of the box. __host__ __device__ __forceinline__ const float2 &get_max() const { return m_p_max; } __host__ __device__ __forceinline__ const float2 &get_min() const { return m_p_min; } // Does a box contain a point. __host__ __device__ bool contains(const float2 &p) const { return p.x >= m_p_min.x && p.x < m_p_max.x && p.y >= m_p_min.y && p.y < m_p_max.y; } // Define the bounding box. __host__ __device__ void set(float min_x, float min_y, float max_x, float max_y) { m_p_min.x = min_x; m_p_min.y = min_y; m_p_max.x = max_x; m_p_max.y = max_y; } }; //////////////////////////////////////////////////////////////////////////////// // A node of a quadree. //////////////////////////////////////////////////////////////////////////////// class Quadtree_node { // The identifier of the node. int m_id; // The bounding box of the tree. Bounding_box m_bounding_box; // The range of points. int m_begin, m_end; public: // Constructor. __host__ __device__ Quadtree_node() : m_id(0), m_begin(0), m_end(0) {} // The ID of a node at its level. __host__ __device__ int id() const { return m_id; } // The ID of a node at its level. __host__ __device__ void set_id(int new_id) { m_id = new_id; } // The bounding box. __host__ __device__ __forceinline__ const Bounding_box &bounding_box() const { return m_bounding_box; } // Set the bounding box. __host__ __device__ __forceinline__ void set_bounding_box(float min_x, float min_y, float max_x, float max_y) { m_bounding_box.set(min_x, min_y, max_x, max_y); } // The number of points in the tree. __host__ __device__ __forceinline__ int num_points() const { return m_end - m_begin; } // The range of points in the tree. __host__ __device__ __forceinline__ int points_begin() const { return m_begin; } __host__ __device__ __forceinline__ int points_end() const { return m_end; } // Define the range for that node. __host__ __device__ __forceinline__ void set_range(int begin, int end) { m_begin = begin; m_end = end; } }; //////////////////////////////////////////////////////////////////////////////// // Algorithm parameters. //////////////////////////////////////////////////////////////////////////////// struct Parameters { // Choose the right set of points to use as in/out. int point_selector; // The number of nodes at a given level (2^k for level k). int num_nodes_at_this_level; // The recursion depth. int depth; // The max value for depth. const int max_depth; // The minimum number of points in a node to stop recursion. const int min_points_per_node; // Constructor set to default values. __host__ __device__ Parameters(int max_depth, int min_points_per_node) : point_selector(0), num_nodes_at_this_level(1), depth(0), max_depth(max_depth), min_points_per_node(min_points_per_node) {} // Copy constructor. Changes the values for next iteration. __host__ __device__ Parameters(const Parameters &params, bool) : point_selector((params.point_selector+1) % 2), num_nodes_at_this_level(4*params.num_nodes_at_this_level), depth(params.depth+1), max_depth(params.max_depth), min_points_per_node(params.min_points_per_node) {} }; //////////////////////////////////////////////////////////////////////////////// // Build a quadtree on the GPU. Use CUDA Dynamic Parallelism. // // The algorithm works as follows. The host (CPU) launches one block of // NUM_THREADS_PER_BLOCK threads. That block will do the following steps: // // 1- Check the number of points and its depth. // // We impose a maximum depth to the tree and a minimum number of points per // node. If the maximum depth is exceeded or the minimum number of points is // reached. The threads in the block exit. // // Before exiting, they perform a buffer swap if it is needed. Indeed, the // algorithm uses two buffers to permute the points and make sure they are // properly distributed in the quadtree. By design we want all points to be // in the first buffer of points at the end of the algorithm. It is the reason // why we may have to swap the buffer before leavin (if the points are in the // 2nd buffer). // // 2- Count the number of points in each child. // // If the depth is not too high and the number of points is sufficient, the // block has to dispatch the points into four geometrical buckets: Its // children. For that purpose, we compute the center of the bounding box and // count the number of points in each quadrant. // // The set of points is divided into sections. Each section is given to a // warp of threads (32 threads). Warps use __ballot and __popc intrinsics // to count the points. See the Programming Guide for more information about // those functions. // // 3- Scan the warps' results to know the "global" numbers. // // Warps work independently from each other. At the end, each warp knows the // number of points in its section. To know the numbers for the block, the // block has to run a scan/reduce at the block level. It's a traditional // approach. The implementation in that sample is not as optimized as what // could be found in fast radix sorts, for example, but it relies on the same // idea. // // 4- Move points. // // Now that the block knows how many points go in each of its 4 children, it // remains to dispatch the points. It is straightforward. // // 5- Launch new blocks. // // The block launches four new blocks: One per children. Each of the four blocks // will apply the same algorithm. //////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_BLOCK > __global__ void build_quadtree_kernel(Quadtree_node *nodes, Points *points, Parameters params) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // The number of warps in a block. const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warpSize; // Shared memory to store the number of points. extern __shared__ int smem[]; // s_num_pts[4][NUM_WARPS_PER_BLOCK]; // Addresses of shared memory. volatile int *s_num_pts[4]; for (int i = 0 ; i < 4 ; ++i) s_num_pts[i] = (volatile int *) &smem[i*NUM_WARPS_PER_BLOCK]; // Compute the coordinates of the threads in the block. const int warp_id = threadIdx.x / warpSize; const int lane_id = threadIdx.x % warpSize; // Mask for compaction. int lane_mask_lt = (1 << lane_id) - 1; // Same as: asm( "mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt) ); // The current node. Quadtree_node &node = nodes[blockIdx.x]; // The number of points in the node. int num_points = node.num_points(); float2 center; int range_begin, range_end; int warp_cnts[4] = {0, 0, 0, 0}; // // 1- Check the number of points and its depth. // // Stop the recursion here. Make sure points[0] contains all the points. if (params.depth >= params.max_depth || num_points <= params.min_points_per_node) { if (params.point_selector == 1) { int it = node.points_begin(), end = node.points_end(); for (it += threadIdx.x ; it < end ; it += NUM_THREADS_PER_BLOCK) if (it < end) points[0].set_point(it, points[1].get_point(it)); } return; } // Compute the center of the bounding box of the points. const Bounding_box &bbox = node.bounding_box(); bbox.compute_center(center); // Find how many points to give to each warp. int num_points_per_warp = max(warpSize, (num_points + NUM_WARPS_PER_BLOCK-1) / NUM_WARPS_PER_BLOCK); // Each warp of threads will compute the number of points to move to each quadrant. range_begin = node.points_begin() + warp_id * num_points_per_warp; range_end = min(range_begin + num_points_per_warp, node.points_end()); // // 2- Count the number of points in each child. // // Input points. const Points &in_points = points[params.point_selector]; cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); // Compute the number of points. for (int range_it = range_begin + tile32.thread_rank() ; tile32.any(range_it < range_end) ; range_it += warpSize) { // Is it still an active thread? bool is_active = range_it < range_end; // Load the coordinates of the point. float2 p = is_active ? in_points.get_point(range_it) : make_float2(0.0f, 0.0f); // Count top-left points. int num_pts = __popc(tile32.ballot(is_active && p.x < center.x && p.y >= center.y)); warp_cnts[0] += tile32.shfl(num_pts, 0); // Count top-right points. num_pts = __popc(tile32.ballot(is_active && p.x >= center.x && p.y >= center.y)); warp_cnts[1] += tile32.shfl(num_pts, 0); // Count bottom-left points. num_pts = __popc(tile32.ballot(is_active && p.x < center.x && p.y < center.y)); warp_cnts[2] += tile32.shfl(num_pts, 0); // Count bottom-right points. num_pts = __popc(tile32.ballot(is_active && p.x >= center.x && p.y < center.y)); warp_cnts[3] += tile32.shfl(num_pts, 0); } if (tile32.thread_rank() == 0) { s_num_pts[0][warp_id] = warp_cnts[0]; s_num_pts[1][warp_id] = warp_cnts[1]; s_num_pts[2][warp_id] = warp_cnts[2]; s_num_pts[3][warp_id] = warp_cnts[3]; } // Make sure warps have finished counting. cg::sync(cta); // // 3- Scan the warps' results to know the "global" numbers. // // First 4 warps scan the numbers of points per child (inclusive scan). if (warp_id < 4) { int num_pts = tile32.thread_rank() < NUM_WARPS_PER_BLOCK ? s_num_pts[warp_id][tile32.thread_rank()] : 0; #pragma unroll for (int offset = 1 ; offset < NUM_WARPS_PER_BLOCK ; offset *= 2) { int n = tile32.shfl_up(num_pts, offset); if (tile32.thread_rank() >= offset) num_pts += n; } if (tile32.thread_rank() < NUM_WARPS_PER_BLOCK) s_num_pts[warp_id][tile32.thread_rank()] = num_pts; } cg::sync(cta); // Compute global offsets. if (warp_id == 0) { int sum = s_num_pts[0][NUM_WARPS_PER_BLOCK-1]; for (int row = 1 ; row < 4 ; ++row) { int tmp = s_num_pts[row][NUM_WARPS_PER_BLOCK-1]; cg::sync(tile32); if (tile32.thread_rank() < NUM_WARPS_PER_BLOCK) s_num_pts[row][tile32.thread_rank()] += sum; cg::sync(tile32); sum += tmp; } } cg::sync(cta); // Make the scan exclusive. int val = 0; if (threadIdx.x < 4*NUM_WARPS_PER_BLOCK) { val = threadIdx.x == 0 ? 0 : smem[threadIdx.x-1]; val += node.points_begin(); } cg::sync(cta); if (threadIdx.x < 4*NUM_WARPS_PER_BLOCK) { smem[threadIdx.x] = val; } cg::sync(cta); // // 4- Move points. // if (!(params.depth >= params.max_depth || num_points <= params.min_points_per_node)) { // Output points. Points &out_points = points[(params.point_selector+1) % 2]; warp_cnts[0] = s_num_pts[0][warp_id]; warp_cnts[1] = s_num_pts[1][warp_id]; warp_cnts[2] = s_num_pts[2][warp_id]; warp_cnts[3] = s_num_pts[3][warp_id]; const Points &in_points = points[params.point_selector]; // Reorder points. for (int range_it = range_begin + tile32.thread_rank(); tile32.any(range_it < range_end) ; range_it += warpSize) { // Is it still an active thread? bool is_active = range_it < range_end; // Load the coordinates of the point. float2 p = is_active ? in_points.get_point(range_it) : make_float2(0.0f, 0.0f); // Count top-left points. bool pred = is_active && p.x < center.x && p.y >= center.y; int vote = tile32.ballot(pred); int dest = warp_cnts[0] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[0] += tile32.shfl(__popc(vote), 0); // Count top-right points. pred = is_active && p.x >= center.x && p.y >= center.y; vote = tile32.ballot(pred); dest = warp_cnts[1] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[1] += tile32.shfl(__popc(vote), 0); // Count bottom-left points. pred = is_active && p.x < center.x && p.y < center.y; vote = tile32.ballot(pred); dest = warp_cnts[2] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[2] += tile32.shfl(__popc(vote), 0); // Count bottom-right points. pred = is_active && p.x >= center.x && p.y < center.y; vote = tile32.ballot(pred); dest = warp_cnts[3] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[3] += tile32.shfl(__popc(vote), 0); } } cg::sync(cta); if (tile32.thread_rank() == 0) { s_num_pts[0][warp_id] = warp_cnts[0]; s_num_pts[1][warp_id] = warp_cnts[1] ; s_num_pts[2][warp_id] = warp_cnts[2] ; s_num_pts[3][warp_id] = warp_cnts[3]; } cg::sync(cta); // // 5- Launch new blocks. // if (!(params.depth >= params.max_depth || num_points <= params.min_points_per_node)) { // The last thread launches new blocks. if (threadIdx.x == NUM_THREADS_PER_BLOCK-1 ) { // The children. Quadtree_node *children = &nodes[params.num_nodes_at_this_level - (node.id() & ~3)]; // The offsets of the children at their level. int child_offset = 4*node.id(); // Set IDs. children[child_offset+0].set_id(4*node.id()+0); children[child_offset+1].set_id(4*node.id()+1); children[child_offset+2].set_id(4*node.id()+2); children[child_offset+3].set_id(4*node.id()+3); const Bounding_box &bbox = node.bounding_box(); // Points of the bounding-box. const float2 &p_min = bbox.get_min(); const float2 &p_max = bbox.get_max(); // Set the bounding boxes of the children. children[child_offset+0].set_bounding_box(p_min.x , center.y, center.x, p_max.y); // Top-left. children[child_offset+1].set_bounding_box(center.x, center.y, p_max.x , p_max.y); // Top-right. children[child_offset+2].set_bounding_box(p_min.x , p_min.y , center.x, center.y); // Bottom-left. children[child_offset+3].set_bounding_box(center.x, p_min.y , p_max.x , center.y); // Bottom-right. // Set the ranges of the children. children[child_offset+0].set_range(node.points_begin(), s_num_pts[0][warp_id]); children[child_offset+1].set_range(s_num_pts[0][warp_id], s_num_pts[1][warp_id]); children[child_offset+2].set_range(s_num_pts[1][warp_id], s_num_pts[2][warp_id]); children[child_offset+3].set_range(s_num_pts[2][warp_id], s_num_pts[3][warp_id]); // Launch 4 children. hipLaunchKernelGGL(( build_quadtree_kernel<NUM_THREADS_PER_BLOCK>), dim3(4), dim3(NUM_THREADS_PER_BLOCK), 4 *NUM_WARPS_PER_BLOCK *sizeof(int), 0, &children[child_offset], points, Parameters(params, true)); } } } //////////////////////////////////////////////////////////////////////////////// // Make sure a Quadtree is properly defined. //////////////////////////////////////////////////////////////////////////////// bool check_quadtree(const Quadtree_node *nodes, int idx, int num_pts, Points *pts, Parameters params) { const Quadtree_node &node = nodes[idx]; int num_points = node.num_points(); if (!(params.depth == params.max_depth || num_points <= params.min_points_per_node)) { int num_points_in_children = 0; num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+0].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+1].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+2].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+3].num_points(); if (num_points_in_children != node.num_points()) return false; return check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+0, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+1, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+2, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+3, num_pts, pts, Parameters(params, true)); } const Bounding_box &bbox = node.bounding_box(); for (int it = node.points_begin() ; it < node.points_end() ; ++it) { if (it >= num_pts) return false; float2 p = pts->get_point(it); if (!bbox.contains(p)) return false; } return true; } //////////////////////////////////////////////////////////////////////////////// // Parallel random number generator. //////////////////////////////////////////////////////////////////////////////// struct Random_generator { int count; __host__ __device__ Random_generator() : count(0) {} __host__ __device__ unsigned int hash(unsigned int a) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } __host__ __device__ __forceinline__ thrust::tuple<float, float> operator()() { #ifdef __CUDA_ARCH__ unsigned seed = hash(blockIdx.x*blockDim.x + threadIdx.x + count); // thrust::generate may call operator() more than once per thread. // Hence, increment count by grid size to ensure uniqueness of seed count += blockDim.x * gridDim.x; #else unsigned seed = hash(0); #endif thrust::default_random_engine rng(seed); thrust::random::uniform_real_distribution<float> distrib; return thrust::make_tuple(distrib(rng), distrib(rng)); } }; //////////////////////////////////////////////////////////////////////////////// // Allocate GPU structs, launch kernel and clean up //////////////////////////////////////////////////////////////////////////////// bool cdpQuadtree(int warp_size) { // Constants to control the algorithm. const int num_points = 1024; const int max_depth = 8; const int min_points_per_node = 16; // Allocate memory for points. thrust::device_vector<float> x_d0(num_points); thrust::device_vector<float> x_d1(num_points); thrust::device_vector<float> y_d0(num_points); thrust::device_vector<float> y_d1(num_points); // Generate random points. Random_generator rnd; thrust::generate( thrust::make_zip_iterator(thrust::make_tuple(x_d0.begin(), y_d0.begin())), thrust::make_zip_iterator(thrust::make_tuple(x_d0.end(), y_d0.end())), rnd); // Host structures to analyze the device ones. Points points_init[2]; points_init[0].set(thrust::raw_pointer_cast(&x_d0[0]), thrust::raw_pointer_cast(&y_d0[0])); points_init[1].set(thrust::raw_pointer_cast(&x_d1[0]), thrust::raw_pointer_cast(&y_d1[0])); // Allocate memory to store points. Points *points; checkCudaErrors(hipMalloc((void **) &points, 2*sizeof(Points))); checkCudaErrors(hipMemcpy(points, points_init, 2*sizeof(Points), hipMemcpyHostToDevice)); // We could use a close form... int max_nodes = 0; for (int i = 0, num_nodes_at_level = 1 ; i < max_depth ; ++i, num_nodes_at_level *= 4) max_nodes += num_nodes_at_level; // Allocate memory to store the tree. Quadtree_node root; root.set_range(0, num_points); Quadtree_node *nodes; checkCudaErrors(hipMalloc((void **) &nodes, max_nodes*sizeof(Quadtree_node))); checkCudaErrors(hipMemcpy(nodes, &root, sizeof(Quadtree_node), hipMemcpyHostToDevice)); // We set the recursion limit for CDP to max_depth. hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, max_depth); // Build the quadtree. Parameters params(max_depth, min_points_per_node); std::cout << "Launching CDP kernel to build the quadtree" << std::endl; const int NUM_THREADS_PER_BLOCK = 128; // Do not use less than 128 threads. const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warp_size; const size_t smem_size = 4*NUM_WARPS_PER_BLOCK*sizeof(int); hipLaunchKernelGGL(( build_quadtree_kernel<NUM_THREADS_PER_BLOCK>), dim3(1), dim3(NUM_THREADS_PER_BLOCK), smem_size, 0, nodes, points, params); checkCudaErrors(hipGetLastError()); // Copy points to CPU. thrust::host_vector<float> x_h(x_d0); thrust::host_vector<float> y_h(y_d0); Points host_points; host_points.set(thrust::raw_pointer_cast(&x_h[0]), thrust::raw_pointer_cast(&y_h[0])); // Copy nodes to CPU. Quadtree_node *host_nodes = new Quadtree_node[max_nodes]; checkCudaErrors(hipMemcpy(host_nodes, nodes, max_nodes *sizeof(Quadtree_node), hipMemcpyDeviceToHost)); // Validate the results. bool ok = check_quadtree(host_nodes, 0, num_points, &host_points, params); std::cout << "Results: " << (ok ? "OK" : "FAILED") << std::endl; // Free CPU memory. delete[] host_nodes; // Free memory. checkCudaErrors(hipFree(nodes)); checkCudaErrors(hipFree(points)); return ok; } //////////////////////////////////////////////////////////////////////////////// // Main entry point. //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // Find/set the device. // The test requires an architecture SM35 or greater (CDP capable). int cuda_device = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProps; checkCudaErrors(hipGetDeviceProperties(&deviceProps, cuda_device)); int cdpCapable = (deviceProps.major == 3 && deviceProps.minor >= 5) || deviceProps.major >=4; printf("GPU device %s has compute capabilities (SM %d.%d)\n", deviceProps.name, deviceProps.major, deviceProps.minor); if (!cdpCapable) { std::cerr << "cdpQuadTree requires SM 3.5 or higher to use CUDA Dynamic Parallelism. Exiting...\n" << std::endl; exit(EXIT_WAIVED); } bool ok = cdpQuadtree(deviceProps.warpSize); return (ok ? EXIT_SUCCESS : EXIT_FAILURE); }
e097f9a65ce8418775aaaaefeda1e7ea54d07922.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <thrust/random.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cooperative_groups.h> namespace cg = cooperative_groups; #include <helper_cuda.h> //////////////////////////////////////////////////////////////////////////////// // A structure of 2D points (structure of arrays). //////////////////////////////////////////////////////////////////////////////// class Points { float *m_x; float *m_y; public: // Constructor. __host__ __device__ Points() : m_x(NULL), m_y(NULL) {} // Constructor. __host__ __device__ Points(float *x, float *y) : m_x(x), m_y(y) {} // Get a point. __host__ __device__ __forceinline__ float2 get_point(int idx) const { return make_float2(m_x[idx], m_y[idx]); } // Set a point. __host__ __device__ __forceinline__ void set_point(int idx, const float2 &p) { m_x[idx] = p.x; m_y[idx] = p.y; } // Set the pointers. __host__ __device__ __forceinline__ void set(float *x, float *y) { m_x = x; m_y = y; } }; //////////////////////////////////////////////////////////////////////////////// // A 2D bounding box //////////////////////////////////////////////////////////////////////////////// class Bounding_box { // Extreme points of the bounding box. float2 m_p_min; float2 m_p_max; public: // Constructor. Create a unit box. __host__ __device__ Bounding_box() { m_p_min = make_float2(0.0f, 0.0f); m_p_max = make_float2(1.0f, 1.0f); } // Compute the center of the bounding-box. __host__ __device__ void compute_center(float2 &center) const { center.x = 0.5f * (m_p_min.x + m_p_max.x); center.y = 0.5f * (m_p_min.y + m_p_max.y); } // The points of the box. __host__ __device__ __forceinline__ const float2 &get_max() const { return m_p_max; } __host__ __device__ __forceinline__ const float2 &get_min() const { return m_p_min; } // Does a box contain a point. __host__ __device__ bool contains(const float2 &p) const { return p.x >= m_p_min.x && p.x < m_p_max.x && p.y >= m_p_min.y && p.y < m_p_max.y; } // Define the bounding box. __host__ __device__ void set(float min_x, float min_y, float max_x, float max_y) { m_p_min.x = min_x; m_p_min.y = min_y; m_p_max.x = max_x; m_p_max.y = max_y; } }; //////////////////////////////////////////////////////////////////////////////// // A node of a quadree. //////////////////////////////////////////////////////////////////////////////// class Quadtree_node { // The identifier of the node. int m_id; // The bounding box of the tree. Bounding_box m_bounding_box; // The range of points. int m_begin, m_end; public: // Constructor. __host__ __device__ Quadtree_node() : m_id(0), m_begin(0), m_end(0) {} // The ID of a node at its level. __host__ __device__ int id() const { return m_id; } // The ID of a node at its level. __host__ __device__ void set_id(int new_id) { m_id = new_id; } // The bounding box. __host__ __device__ __forceinline__ const Bounding_box &bounding_box() const { return m_bounding_box; } // Set the bounding box. __host__ __device__ __forceinline__ void set_bounding_box(float min_x, float min_y, float max_x, float max_y) { m_bounding_box.set(min_x, min_y, max_x, max_y); } // The number of points in the tree. __host__ __device__ __forceinline__ int num_points() const { return m_end - m_begin; } // The range of points in the tree. __host__ __device__ __forceinline__ int points_begin() const { return m_begin; } __host__ __device__ __forceinline__ int points_end() const { return m_end; } // Define the range for that node. __host__ __device__ __forceinline__ void set_range(int begin, int end) { m_begin = begin; m_end = end; } }; //////////////////////////////////////////////////////////////////////////////// // Algorithm parameters. //////////////////////////////////////////////////////////////////////////////// struct Parameters { // Choose the right set of points to use as in/out. int point_selector; // The number of nodes at a given level (2^k for level k). int num_nodes_at_this_level; // The recursion depth. int depth; // The max value for depth. const int max_depth; // The minimum number of points in a node to stop recursion. const int min_points_per_node; // Constructor set to default values. __host__ __device__ Parameters(int max_depth, int min_points_per_node) : point_selector(0), num_nodes_at_this_level(1), depth(0), max_depth(max_depth), min_points_per_node(min_points_per_node) {} // Copy constructor. Changes the values for next iteration. __host__ __device__ Parameters(const Parameters &params, bool) : point_selector((params.point_selector+1) % 2), num_nodes_at_this_level(4*params.num_nodes_at_this_level), depth(params.depth+1), max_depth(params.max_depth), min_points_per_node(params.min_points_per_node) {} }; //////////////////////////////////////////////////////////////////////////////// // Build a quadtree on the GPU. Use CUDA Dynamic Parallelism. // // The algorithm works as follows. The host (CPU) launches one block of // NUM_THREADS_PER_BLOCK threads. That block will do the following steps: // // 1- Check the number of points and its depth. // // We impose a maximum depth to the tree and a minimum number of points per // node. If the maximum depth is exceeded or the minimum number of points is // reached. The threads in the block exit. // // Before exiting, they perform a buffer swap if it is needed. Indeed, the // algorithm uses two buffers to permute the points and make sure they are // properly distributed in the quadtree. By design we want all points to be // in the first buffer of points at the end of the algorithm. It is the reason // why we may have to swap the buffer before leavin (if the points are in the // 2nd buffer). // // 2- Count the number of points in each child. // // If the depth is not too high and the number of points is sufficient, the // block has to dispatch the points into four geometrical buckets: Its // children. For that purpose, we compute the center of the bounding box and // count the number of points in each quadrant. // // The set of points is divided into sections. Each section is given to a // warp of threads (32 threads). Warps use __ballot and __popc intrinsics // to count the points. See the Programming Guide for more information about // those functions. // // 3- Scan the warps' results to know the "global" numbers. // // Warps work independently from each other. At the end, each warp knows the // number of points in its section. To know the numbers for the block, the // block has to run a scan/reduce at the block level. It's a traditional // approach. The implementation in that sample is not as optimized as what // could be found in fast radix sorts, for example, but it relies on the same // idea. // // 4- Move points. // // Now that the block knows how many points go in each of its 4 children, it // remains to dispatch the points. It is straightforward. // // 5- Launch new blocks. // // The block launches four new blocks: One per children. Each of the four blocks // will apply the same algorithm. //////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_BLOCK > __global__ void build_quadtree_kernel(Quadtree_node *nodes, Points *points, Parameters params) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // The number of warps in a block. const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warpSize; // Shared memory to store the number of points. extern __shared__ int smem[]; // s_num_pts[4][NUM_WARPS_PER_BLOCK]; // Addresses of shared memory. volatile int *s_num_pts[4]; for (int i = 0 ; i < 4 ; ++i) s_num_pts[i] = (volatile int *) &smem[i*NUM_WARPS_PER_BLOCK]; // Compute the coordinates of the threads in the block. const int warp_id = threadIdx.x / warpSize; const int lane_id = threadIdx.x % warpSize; // Mask for compaction. int lane_mask_lt = (1 << lane_id) - 1; // Same as: asm( "mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt) ); // The current node. Quadtree_node &node = nodes[blockIdx.x]; // The number of points in the node. int num_points = node.num_points(); float2 center; int range_begin, range_end; int warp_cnts[4] = {0, 0, 0, 0}; // // 1- Check the number of points and its depth. // // Stop the recursion here. Make sure points[0] contains all the points. if (params.depth >= params.max_depth || num_points <= params.min_points_per_node) { if (params.point_selector == 1) { int it = node.points_begin(), end = node.points_end(); for (it += threadIdx.x ; it < end ; it += NUM_THREADS_PER_BLOCK) if (it < end) points[0].set_point(it, points[1].get_point(it)); } return; } // Compute the center of the bounding box of the points. const Bounding_box &bbox = node.bounding_box(); bbox.compute_center(center); // Find how many points to give to each warp. int num_points_per_warp = max(warpSize, (num_points + NUM_WARPS_PER_BLOCK-1) / NUM_WARPS_PER_BLOCK); // Each warp of threads will compute the number of points to move to each quadrant. range_begin = node.points_begin() + warp_id * num_points_per_warp; range_end = min(range_begin + num_points_per_warp, node.points_end()); // // 2- Count the number of points in each child. // // Input points. const Points &in_points = points[params.point_selector]; cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); // Compute the number of points. for (int range_it = range_begin + tile32.thread_rank() ; tile32.any(range_it < range_end) ; range_it += warpSize) { // Is it still an active thread? bool is_active = range_it < range_end; // Load the coordinates of the point. float2 p = is_active ? in_points.get_point(range_it) : make_float2(0.0f, 0.0f); // Count top-left points. int num_pts = __popc(tile32.ballot(is_active && p.x < center.x && p.y >= center.y)); warp_cnts[0] += tile32.shfl(num_pts, 0); // Count top-right points. num_pts = __popc(tile32.ballot(is_active && p.x >= center.x && p.y >= center.y)); warp_cnts[1] += tile32.shfl(num_pts, 0); // Count bottom-left points. num_pts = __popc(tile32.ballot(is_active && p.x < center.x && p.y < center.y)); warp_cnts[2] += tile32.shfl(num_pts, 0); // Count bottom-right points. num_pts = __popc(tile32.ballot(is_active && p.x >= center.x && p.y < center.y)); warp_cnts[3] += tile32.shfl(num_pts, 0); } if (tile32.thread_rank() == 0) { s_num_pts[0][warp_id] = warp_cnts[0]; s_num_pts[1][warp_id] = warp_cnts[1]; s_num_pts[2][warp_id] = warp_cnts[2]; s_num_pts[3][warp_id] = warp_cnts[3]; } // Make sure warps have finished counting. cg::sync(cta); // // 3- Scan the warps' results to know the "global" numbers. // // First 4 warps scan the numbers of points per child (inclusive scan). if (warp_id < 4) { int num_pts = tile32.thread_rank() < NUM_WARPS_PER_BLOCK ? s_num_pts[warp_id][tile32.thread_rank()] : 0; #pragma unroll for (int offset = 1 ; offset < NUM_WARPS_PER_BLOCK ; offset *= 2) { int n = tile32.shfl_up(num_pts, offset); if (tile32.thread_rank() >= offset) num_pts += n; } if (tile32.thread_rank() < NUM_WARPS_PER_BLOCK) s_num_pts[warp_id][tile32.thread_rank()] = num_pts; } cg::sync(cta); // Compute global offsets. if (warp_id == 0) { int sum = s_num_pts[0][NUM_WARPS_PER_BLOCK-1]; for (int row = 1 ; row < 4 ; ++row) { int tmp = s_num_pts[row][NUM_WARPS_PER_BLOCK-1]; cg::sync(tile32); if (tile32.thread_rank() < NUM_WARPS_PER_BLOCK) s_num_pts[row][tile32.thread_rank()] += sum; cg::sync(tile32); sum += tmp; } } cg::sync(cta); // Make the scan exclusive. int val = 0; if (threadIdx.x < 4*NUM_WARPS_PER_BLOCK) { val = threadIdx.x == 0 ? 0 : smem[threadIdx.x-1]; val += node.points_begin(); } cg::sync(cta); if (threadIdx.x < 4*NUM_WARPS_PER_BLOCK) { smem[threadIdx.x] = val; } cg::sync(cta); // // 4- Move points. // if (!(params.depth >= params.max_depth || num_points <= params.min_points_per_node)) { // Output points. Points &out_points = points[(params.point_selector+1) % 2]; warp_cnts[0] = s_num_pts[0][warp_id]; warp_cnts[1] = s_num_pts[1][warp_id]; warp_cnts[2] = s_num_pts[2][warp_id]; warp_cnts[3] = s_num_pts[3][warp_id]; const Points &in_points = points[params.point_selector]; // Reorder points. for (int range_it = range_begin + tile32.thread_rank(); tile32.any(range_it < range_end) ; range_it += warpSize) { // Is it still an active thread? bool is_active = range_it < range_end; // Load the coordinates of the point. float2 p = is_active ? in_points.get_point(range_it) : make_float2(0.0f, 0.0f); // Count top-left points. bool pred = is_active && p.x < center.x && p.y >= center.y; int vote = tile32.ballot(pred); int dest = warp_cnts[0] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[0] += tile32.shfl(__popc(vote), 0); // Count top-right points. pred = is_active && p.x >= center.x && p.y >= center.y; vote = tile32.ballot(pred); dest = warp_cnts[1] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[1] += tile32.shfl(__popc(vote), 0); // Count bottom-left points. pred = is_active && p.x < center.x && p.y < center.y; vote = tile32.ballot(pred); dest = warp_cnts[2] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[2] += tile32.shfl(__popc(vote), 0); // Count bottom-right points. pred = is_active && p.x >= center.x && p.y < center.y; vote = tile32.ballot(pred); dest = warp_cnts[3] + __popc(vote & lane_mask_lt); if (pred) out_points.set_point(dest, p); warp_cnts[3] += tile32.shfl(__popc(vote), 0); } } cg::sync(cta); if (tile32.thread_rank() == 0) { s_num_pts[0][warp_id] = warp_cnts[0]; s_num_pts[1][warp_id] = warp_cnts[1] ; s_num_pts[2][warp_id] = warp_cnts[2] ; s_num_pts[3][warp_id] = warp_cnts[3]; } cg::sync(cta); // // 5- Launch new blocks. // if (!(params.depth >= params.max_depth || num_points <= params.min_points_per_node)) { // The last thread launches new blocks. if (threadIdx.x == NUM_THREADS_PER_BLOCK-1 ) { // The children. Quadtree_node *children = &nodes[params.num_nodes_at_this_level - (node.id() & ~3)]; // The offsets of the children at their level. int child_offset = 4*node.id(); // Set IDs. children[child_offset+0].set_id(4*node.id()+0); children[child_offset+1].set_id(4*node.id()+1); children[child_offset+2].set_id(4*node.id()+2); children[child_offset+3].set_id(4*node.id()+3); const Bounding_box &bbox = node.bounding_box(); // Points of the bounding-box. const float2 &p_min = bbox.get_min(); const float2 &p_max = bbox.get_max(); // Set the bounding boxes of the children. children[child_offset+0].set_bounding_box(p_min.x , center.y, center.x, p_max.y); // Top-left. children[child_offset+1].set_bounding_box(center.x, center.y, p_max.x , p_max.y); // Top-right. children[child_offset+2].set_bounding_box(p_min.x , p_min.y , center.x, center.y); // Bottom-left. children[child_offset+3].set_bounding_box(center.x, p_min.y , p_max.x , center.y); // Bottom-right. // Set the ranges of the children. children[child_offset+0].set_range(node.points_begin(), s_num_pts[0][warp_id]); children[child_offset+1].set_range(s_num_pts[0][warp_id], s_num_pts[1][warp_id]); children[child_offset+2].set_range(s_num_pts[1][warp_id], s_num_pts[2][warp_id]); children[child_offset+3].set_range(s_num_pts[2][warp_id], s_num_pts[3][warp_id]); // Launch 4 children. build_quadtree_kernel<NUM_THREADS_PER_BLOCK><<<4, NUM_THREADS_PER_BLOCK, 4 *NUM_WARPS_PER_BLOCK *sizeof(int)>>>(&children[child_offset], points, Parameters(params, true)); } } } //////////////////////////////////////////////////////////////////////////////// // Make sure a Quadtree is properly defined. //////////////////////////////////////////////////////////////////////////////// bool check_quadtree(const Quadtree_node *nodes, int idx, int num_pts, Points *pts, Parameters params) { const Quadtree_node &node = nodes[idx]; int num_points = node.num_points(); if (!(params.depth == params.max_depth || num_points <= params.min_points_per_node)) { int num_points_in_children = 0; num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+0].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+1].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+2].num_points(); num_points_in_children += nodes[params.num_nodes_at_this_level + 4*idx+3].num_points(); if (num_points_in_children != node.num_points()) return false; return check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+0, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+1, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+2, num_pts, pts, Parameters(params, true)) && check_quadtree(&nodes[params.num_nodes_at_this_level], 4*idx+3, num_pts, pts, Parameters(params, true)); } const Bounding_box &bbox = node.bounding_box(); for (int it = node.points_begin() ; it < node.points_end() ; ++it) { if (it >= num_pts) return false; float2 p = pts->get_point(it); if (!bbox.contains(p)) return false; } return true; } //////////////////////////////////////////////////////////////////////////////// // Parallel random number generator. //////////////////////////////////////////////////////////////////////////////// struct Random_generator { int count; __host__ __device__ Random_generator() : count(0) {} __host__ __device__ unsigned int hash(unsigned int a) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } __host__ __device__ __forceinline__ thrust::tuple<float, float> operator()() { #ifdef __CUDA_ARCH__ unsigned seed = hash(blockIdx.x*blockDim.x + threadIdx.x + count); // thrust::generate may call operator() more than once per thread. // Hence, increment count by grid size to ensure uniqueness of seed count += blockDim.x * gridDim.x; #else unsigned seed = hash(0); #endif thrust::default_random_engine rng(seed); thrust::random::uniform_real_distribution<float> distrib; return thrust::make_tuple(distrib(rng), distrib(rng)); } }; //////////////////////////////////////////////////////////////////////////////// // Allocate GPU structs, launch kernel and clean up //////////////////////////////////////////////////////////////////////////////// bool cdpQuadtree(int warp_size) { // Constants to control the algorithm. const int num_points = 1024; const int max_depth = 8; const int min_points_per_node = 16; // Allocate memory for points. thrust::device_vector<float> x_d0(num_points); thrust::device_vector<float> x_d1(num_points); thrust::device_vector<float> y_d0(num_points); thrust::device_vector<float> y_d1(num_points); // Generate random points. Random_generator rnd; thrust::generate( thrust::make_zip_iterator(thrust::make_tuple(x_d0.begin(), y_d0.begin())), thrust::make_zip_iterator(thrust::make_tuple(x_d0.end(), y_d0.end())), rnd); // Host structures to analyze the device ones. Points points_init[2]; points_init[0].set(thrust::raw_pointer_cast(&x_d0[0]), thrust::raw_pointer_cast(&y_d0[0])); points_init[1].set(thrust::raw_pointer_cast(&x_d1[0]), thrust::raw_pointer_cast(&y_d1[0])); // Allocate memory to store points. Points *points; checkCudaErrors(cudaMalloc((void **) &points, 2*sizeof(Points))); checkCudaErrors(cudaMemcpy(points, points_init, 2*sizeof(Points), cudaMemcpyHostToDevice)); // We could use a close form... int max_nodes = 0; for (int i = 0, num_nodes_at_level = 1 ; i < max_depth ; ++i, num_nodes_at_level *= 4) max_nodes += num_nodes_at_level; // Allocate memory to store the tree. Quadtree_node root; root.set_range(0, num_points); Quadtree_node *nodes; checkCudaErrors(cudaMalloc((void **) &nodes, max_nodes*sizeof(Quadtree_node))); checkCudaErrors(cudaMemcpy(nodes, &root, sizeof(Quadtree_node), cudaMemcpyHostToDevice)); // We set the recursion limit for CDP to max_depth. cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, max_depth); // Build the quadtree. Parameters params(max_depth, min_points_per_node); std::cout << "Launching CDP kernel to build the quadtree" << std::endl; const int NUM_THREADS_PER_BLOCK = 128; // Do not use less than 128 threads. const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / warp_size; const size_t smem_size = 4*NUM_WARPS_PER_BLOCK*sizeof(int); build_quadtree_kernel<NUM_THREADS_PER_BLOCK><<<1, NUM_THREADS_PER_BLOCK, smem_size>>>(nodes, points, params); checkCudaErrors(cudaGetLastError()); // Copy points to CPU. thrust::host_vector<float> x_h(x_d0); thrust::host_vector<float> y_h(y_d0); Points host_points; host_points.set(thrust::raw_pointer_cast(&x_h[0]), thrust::raw_pointer_cast(&y_h[0])); // Copy nodes to CPU. Quadtree_node *host_nodes = new Quadtree_node[max_nodes]; checkCudaErrors(cudaMemcpy(host_nodes, nodes, max_nodes *sizeof(Quadtree_node), cudaMemcpyDeviceToHost)); // Validate the results. bool ok = check_quadtree(host_nodes, 0, num_points, &host_points, params); std::cout << "Results: " << (ok ? "OK" : "FAILED") << std::endl; // Free CPU memory. delete[] host_nodes; // Free memory. checkCudaErrors(cudaFree(nodes)); checkCudaErrors(cudaFree(points)); return ok; } //////////////////////////////////////////////////////////////////////////////// // Main entry point. //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { // Find/set the device. // The test requires an architecture SM35 or greater (CDP capable). int cuda_device = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProps; checkCudaErrors(cudaGetDeviceProperties(&deviceProps, cuda_device)); int cdpCapable = (deviceProps.major == 3 && deviceProps.minor >= 5) || deviceProps.major >=4; printf("GPU device %s has compute capabilities (SM %d.%d)\n", deviceProps.name, deviceProps.major, deviceProps.minor); if (!cdpCapable) { std::cerr << "cdpQuadTree requires SM 3.5 or higher to use CUDA Dynamic Parallelism. Exiting...\n" << std::endl; exit(EXIT_WAIVED); } bool ok = cdpQuadtree(deviceProps.warpSize); return (ok ? EXIT_SUCCESS : EXIT_FAILURE); }
1262173e2525fe5ab83c4b25d75cd31e1d79d0c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include "stdio.h" /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ /* scan should be done in 2 levels: step 1: scan each individual block let the last thread of each block to store its value to a global_array block_sum[blockIdx.x] step 2: scan the block_sum array step 3: add to every element its block offset found in block_sum array for example output[gid] = input[gid] + block_sum[blockIdx.x] */ __global__ void simple_scan(const unsigned int * const input, unsigned int * const output, unsigned int * const sum, const size_t n) { __shared__ extern unsigned int data[]; size_t gid = 2* (blockIdx.x * blockDim.x + threadIdx.x); size_t tid = 2 * threadIdx.x; size_t offset = 1; //data[tid] = (gid < n) ? input[gid] : 0; //data[tid+1] = (gid + 1 < n) ? input[gid+1] : 0; data[tid] = (gid < n) ? input[gid] : 0; data[tid+1] = (gid + 1 < n) ? input[gid+1] : 0; if(gid >= n) return; // problem here: changed blockDim.x>>2 to blockDim.x for (unsigned int d = blockDim.x; d > 0; d >>=1) { __syncthreads(); if(threadIdx.x < d){ int ai = offset * (tid +1) - 1; int bi = offset * (tid +2) - 1; data[bi] += data[ai]; } offset *=2; } // problem here if (tid == 0) data[2*blockDim.x -1] = 0; // problem here changed blockDim.x to 2*blockDim.x for (int d = 1; d < 2*blockDim.x; d*=2) { offset >>= 1; __syncthreads(); if(threadIdx.x < d){ int ai = offset * (tid +1) - 1; int bi = offset * (tid +2) - 1; unsigned int t = data[ai]; data[ai] = data[bi]; data[bi] += t; } } __syncthreads(); //if(gid < n) output[gid] = data[tid]; //if(gid +1 < n) output[gid+1] = data[tid+1]; // Take care with this line if(tid==0) sum[blockIdx.x] = data[2*blockDim.x-1]; } __global__ void apply_predicate(const unsigned int * const input, unsigned int * const predicate, const size_t numElems, const unsigned int sb) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; //size_t id = threadIdx.x; if(gid > numElems) return; predicate[gid] = (input[gid] & sb) == 0; } __global__ void change_positions(const unsigned int* const inputVals, const unsigned int* const inputPos, unsigned int* const outputVals, unsigned int* const outputPos, const unsigned int* const scan0, const unsigned int* const scan1, const unsigned int* const predicate, const size_t numElems, const unsigned int* sum) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; //size_t id = threadIdx.x; //size_t offset = sum[0]; size_t offset = 0; if(gid > numElems) return; if(predicate[gid] == 1){ outputPos[scan0[gid]] = inputPos[gid]; outputVals[scan0[gid]] = inputVals[gid]; }else if(predicate[gid] == 0){ //if(scan1[gid] + offset > numElems) // printf("This is bad\n"); outputPos[scan1[gid] + offset] = inputPos[gid]; outputVals[scan1[gid] + offset] = inputVals[gid]; }else{ printf("Ooops\n"); } } // finalize this reduction __global__ void reduce_sum(const unsigned int* const input, unsigned int * sum, const size_t numElems) { __shared__ extern unsigned int data[]; size_t gid = blockIdx.x * 2 * blockDim.x + threadIdx.x; size_t id = threadIdx.x; //if(gid +blockDim.x > numElems) return; data[id] = gid < numElems ? input[gid] : 0; if(gid + blockDim.x < numElems) data[id] += input[gid + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x/2; s > 0; s>>=1){ if(id < s){ data[id] += data[id + s]; } __syncthreads(); } if (id == 0){ sum[blockIdx.x] = data[id]; //printf("Local Partial sum is %u and global is %u\n",data[id], sum[blockIdx.x]); } } //Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays __global__ void uniformUpdate(unsigned int *d_Data, unsigned int *d_Buffer) { __shared__ uint buf; uint pos = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.x == 0) { buf = d_Buffer[blockIdx.x]; } __syncthreads(); uint data4 = d_Data[pos]; data4 += buf; //data4.x += buf; //data4.y += buf; //data4.z += buf; //data4.w += buf; d_Data[pos] = data4; } __global__ void flip_array(unsigned int* const input, const size_t numElems) { size_t gid = blockIdx.x * 2 * blockDim.x + threadIdx.x; //size_t id = threadIdx.x; if(gid > numElems) return; input[gid] = !input[gid]; } void your_sort(unsigned int* d_inputVals, unsigned int* d_inputPos, unsigned int* d_outputVals, unsigned int* d_outputPos, const size_t numElems) { // Serial Part unsigned int * h_scan0 = new unsigned int[numElems]; unsigned int * h_sum = new unsigned int[numElems]; printf("numElems = %lu\n", numElems); size_t threads = 1024; size_t blocks = (numElems + threads -1) / threads; unsigned int * d_predicate; unsigned int * d_scan0; unsigned int * d_scan1; unsigned int * d_sum1; checkCudaErrors(hipMalloc((void **) &d_predicate, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMalloc((void **) &d_scan0, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMalloc((void **) &d_scan1, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMalloc((void **) &d_sum1, sizeof(unsigned int) * numElems)); checkCudaErrors(hipMemset(d_sum1, 0, sizeof(unsigned int) * numElems)); unsigned int sb = 1<<0; size_t memoryBytes = threads * sizeof(unsigned int); //for (size_t i = 0; i < 32; ++i){ while(/*sb < (1<<31)*/ sb < (1<<1) ){ hipLaunchKernelGGL(( apply_predicate), dim3(blocks), dim3(threads), 0, 0, d_inputVals, d_predicate, numElems, sb); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( simple_scan), dim3(blocks), dim3(threads/2), memoryBytes, 0, d_predicate, d_scan0, d_sum1, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // TODO simple_scan is working for arrays of size k * (threads/2) // check what it needs to be done for different sizes (padding) /// Serial Part //checkCudaErrors(hipMemcpy(h_scan0, d_predicate, sizeof(unsigned int) * numElems, hipMemcpyDeviceToHost)); //for (int i = 0; i < 10; ++i) { // printf("predicate[%d] = %u\n", i, h_scan0[i]); //} //h_scan0[0] = 0; //for (int i = 1; i < 10; ++i) { // h_scan0[i] += h_scan0[i-1]; //if((i-1) % 1024 == 0) //printf("host scan[%d] = %u\n", i-1, h_scan0[i-1]); //} //for (int i = 0; i < 10; ++i) { // printf("host scan[%d] = %u\n", i, h_scan0[i]); //} //for (int i = 0; i < numElems; ++i) { // h_scan0[i] = 0; //} //memset(h_scan0, 0, sizeof(unsigned int) * numElems); //checkCudaErrors(hipMemcpy(h_scan0, d_scan0, sizeof(unsigned int) * numElems, hipMemcpyDeviceToHost)); //for (int i = 0; i < 10; ++i) { //if(i % 1024 == 0) // printf("Device scan[%d] = %u\n",i, h_scan0[i]); //} //printf("Host sum = %u\n",h_sum[0]); /// End of Serial part /* size_t elems = numElems; //size_t bytes = threads * sizeof(unsigned int); size_t numBlocks = (elems + 2*threads-1)/(2*threads); reduce_sum<<<numBlocks, threads, memoryBytes>>>(d_predicate, d_sum1, elems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); while(numBlocks > 1){ elems = numBlocks; numBlocks = (elems + 2*threads-1)/(2*threads); reduce_sum<<<numBlocks, threads, memoryBytes>>>(d_sum1, d_sum1, elems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } // @d_sum1[0] should be the final sum // Serial Part checkCudaErrors(hipMemcpy(h_sum, d_sum1, sizeof(unsigned int), hipMemcpyDeviceToHost)); printf("Device sum = %u\n", h_sum[0]); // End of serial part flip_array<<<blocks, threads>>>(d_predicate, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); simple_scan<<<blocks, threads, memoryBytes>>>(d_predicate, d_scan1, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); change_positions<<<blocks, threads>>>(d_inputVals, d_inputPos, d_outputVals, d_outputPos, d_scan0, d_scan1, d_predicate, numElems, d_sum1); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); */ std::swap(d_inputPos, d_outputPos); std::swap(d_inputVals, d_outputVals); sb = sb << 1; std::cout << "end of iteration no " << sb <<'\n'; } //std::swap(d_inputPos, d_outputPos); // std::swap(d_inputVals, d_outputVals); std::cout << "its over\n"; }
1262173e2525fe5ab83c4b25d75cd31e1d79d0c1.cu
//Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> #include "stdio.h" /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ /* scan should be done in 2 levels: step 1: scan each individual block let the last thread of each block to store its value to a global_array block_sum[blockIdx.x] step 2: scan the block_sum array step 3: add to every element its block offset found in block_sum array for example output[gid] = input[gid] + block_sum[blockIdx.x] */ __global__ void simple_scan(const unsigned int * const input, unsigned int * const output, unsigned int * const sum, const size_t n) { __shared__ extern unsigned int data[]; size_t gid = 2* (blockIdx.x * blockDim.x + threadIdx.x); size_t tid = 2 * threadIdx.x; size_t offset = 1; //data[tid] = (gid < n) ? input[gid] : 0; //data[tid+1] = (gid + 1 < n) ? input[gid+1] : 0; data[tid] = (gid < n) ? input[gid] : 0; data[tid+1] = (gid + 1 < n) ? input[gid+1] : 0; if(gid >= n) return; // problem here: changed blockDim.x>>2 to blockDim.x for (unsigned int d = blockDim.x; d > 0; d >>=1) { __syncthreads(); if(threadIdx.x < d){ int ai = offset * (tid +1) - 1; int bi = offset * (tid +2) - 1; data[bi] += data[ai]; } offset *=2; } // problem here if (tid == 0) data[2*blockDim.x -1] = 0; // problem here changed blockDim.x to 2*blockDim.x for (int d = 1; d < 2*blockDim.x; d*=2) { offset >>= 1; __syncthreads(); if(threadIdx.x < d){ int ai = offset * (tid +1) - 1; int bi = offset * (tid +2) - 1; unsigned int t = data[ai]; data[ai] = data[bi]; data[bi] += t; } } __syncthreads(); //if(gid < n) output[gid] = data[tid]; //if(gid +1 < n) output[gid+1] = data[tid+1]; // Take care with this line if(tid==0) sum[blockIdx.x] = data[2*blockDim.x-1]; } __global__ void apply_predicate(const unsigned int * const input, unsigned int * const predicate, const size_t numElems, const unsigned int sb) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; //size_t id = threadIdx.x; if(gid > numElems) return; predicate[gid] = (input[gid] & sb) == 0; } __global__ void change_positions(const unsigned int* const inputVals, const unsigned int* const inputPos, unsigned int* const outputVals, unsigned int* const outputPos, const unsigned int* const scan0, const unsigned int* const scan1, const unsigned int* const predicate, const size_t numElems, const unsigned int* sum) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; //size_t id = threadIdx.x; //size_t offset = sum[0]; size_t offset = 0; if(gid > numElems) return; if(predicate[gid] == 1){ outputPos[scan0[gid]] = inputPos[gid]; outputVals[scan0[gid]] = inputVals[gid]; }else if(predicate[gid] == 0){ //if(scan1[gid] + offset > numElems) // printf("This is bad\n"); outputPos[scan1[gid] + offset] = inputPos[gid]; outputVals[scan1[gid] + offset] = inputVals[gid]; }else{ printf("Ooops\n"); } } // finalize this reduction __global__ void reduce_sum(const unsigned int* const input, unsigned int * sum, const size_t numElems) { __shared__ extern unsigned int data[]; size_t gid = blockIdx.x * 2 * blockDim.x + threadIdx.x; size_t id = threadIdx.x; //if(gid +blockDim.x > numElems) return; data[id] = gid < numElems ? input[gid] : 0; if(gid + blockDim.x < numElems) data[id] += input[gid + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x/2; s > 0; s>>=1){ if(id < s){ data[id] += data[id + s]; } __syncthreads(); } if (id == 0){ sum[blockIdx.x] = data[id]; //printf("Local Partial sum is %u and global is %u\n",data[id], sum[blockIdx.x]); } } //Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays __global__ void uniformUpdate(unsigned int *d_Data, unsigned int *d_Buffer) { __shared__ uint buf; uint pos = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.x == 0) { buf = d_Buffer[blockIdx.x]; } __syncthreads(); uint data4 = d_Data[pos]; data4 += buf; //data4.x += buf; //data4.y += buf; //data4.z += buf; //data4.w += buf; d_Data[pos] = data4; } __global__ void flip_array(unsigned int* const input, const size_t numElems) { size_t gid = blockIdx.x * 2 * blockDim.x + threadIdx.x; //size_t id = threadIdx.x; if(gid > numElems) return; input[gid] = !input[gid]; } void your_sort(unsigned int* d_inputVals, unsigned int* d_inputPos, unsigned int* d_outputVals, unsigned int* d_outputPos, const size_t numElems) { // Serial Part unsigned int * h_scan0 = new unsigned int[numElems]; unsigned int * h_sum = new unsigned int[numElems]; printf("numElems = %lu\n", numElems); size_t threads = 1024; size_t blocks = (numElems + threads -1) / threads; unsigned int * d_predicate; unsigned int * d_scan0; unsigned int * d_scan1; unsigned int * d_sum1; checkCudaErrors(cudaMalloc((void **) &d_predicate, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMalloc((void **) &d_scan0, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMalloc((void **) &d_scan1, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMalloc((void **) &d_sum1, sizeof(unsigned int) * numElems)); checkCudaErrors(cudaMemset(d_sum1, 0, sizeof(unsigned int) * numElems)); unsigned int sb = 1<<0; size_t memoryBytes = threads * sizeof(unsigned int); //for (size_t i = 0; i < 32; ++i){ while(/*sb < (1<<31)*/ sb < (1<<1) ){ apply_predicate<<<blocks, threads>>>(d_inputVals, d_predicate, numElems, sb); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); simple_scan<<<blocks, threads/2, memoryBytes>>>(d_predicate, d_scan0, d_sum1, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // TODO simple_scan is working for arrays of size k * (threads/2) // check what it needs to be done for different sizes (padding) /// Serial Part //checkCudaErrors(cudaMemcpy(h_scan0, d_predicate, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToHost)); //for (int i = 0; i < 10; ++i) { // printf("predicate[%d] = %u\n", i, h_scan0[i]); //} //h_scan0[0] = 0; //for (int i = 1; i < 10; ++i) { // h_scan0[i] += h_scan0[i-1]; //if((i-1) % 1024 == 0) //printf("host scan[%d] = %u\n", i-1, h_scan0[i-1]); //} //for (int i = 0; i < 10; ++i) { // printf("host scan[%d] = %u\n", i, h_scan0[i]); //} //for (int i = 0; i < numElems; ++i) { // h_scan0[i] = 0; //} //memset(h_scan0, 0, sizeof(unsigned int) * numElems); //checkCudaErrors(cudaMemcpy(h_scan0, d_scan0, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToHost)); //for (int i = 0; i < 10; ++i) { //if(i % 1024 == 0) // printf("Device scan[%d] = %u\n",i, h_scan0[i]); //} //printf("Host sum = %u\n",h_sum[0]); /// End of Serial part /* size_t elems = numElems; //size_t bytes = threads * sizeof(unsigned int); size_t numBlocks = (elems + 2*threads-1)/(2*threads); reduce_sum<<<numBlocks, threads, memoryBytes>>>(d_predicate, d_sum1, elems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); while(numBlocks > 1){ elems = numBlocks; numBlocks = (elems + 2*threads-1)/(2*threads); reduce_sum<<<numBlocks, threads, memoryBytes>>>(d_sum1, d_sum1, elems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } // @d_sum1[0] should be the final sum // Serial Part checkCudaErrors(cudaMemcpy(h_sum, d_sum1, sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf("Device sum = %u\n", h_sum[0]); // End of serial part flip_array<<<blocks, threads>>>(d_predicate, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); simple_scan<<<blocks, threads, memoryBytes>>>(d_predicate, d_scan1, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); change_positions<<<blocks, threads>>>(d_inputVals, d_inputPos, d_outputVals, d_outputPos, d_scan0, d_scan1, d_predicate, numElems, d_sum1); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); */ std::swap(d_inputPos, d_outputPos); std::swap(d_inputVals, d_outputVals); sb = sb << 1; std::cout << "end of iteration no " << sb <<'\n'; } //std::swap(d_inputPos, d_outputPos); // std::swap(d_inputVals, d_outputVals); std::cout << "its over\n"; }
4256b378ff5dc802bba4ee802cb9568869f1c41b.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _TRACKFIT_KERNEL_H_ #define _TRACKFIT_KERNEL_H_ #include <stdio.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> inline __device__ int Newton(double Mxx, double Myy, double Mxy, double Mxz, double Myz, double Mzz, double *result) { double Mz, Mxz2, Myz2, Cov_xy; //,temp; double A0, A1, A2, A22, epsilon = 0.000000000001; double Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters double GAM, DET; // double Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[0] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[0] * result[0] + result[1] * result[1] - GAM) < 0.) return 0; result[2] = sqrt(result[0] * result[0] + result[1] * result[1] - GAM); return 1; } __global__ void Fit(double *X, double *Y, double *Z1, double *Z1err, double *Mx, double *My, double *M0, double *result) { unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Negative root: x=%f\n"); __shared__ double Xs[30]; __shared__ double Ys[30]; __shared__ double Xis[30]; __shared__ double Yis[30]; __shared__ double Zis[30]; __shared__ double Z1s[30]; __shared__ double Z1Errs[30]; __shared__ double rho[30]; __shared__ double fZWeight[30]; if (x < 30){ Xs[x] = X[x]; Ys[x] = Y[x]; Z1s[x] = Z1[x]; Z1Errs[x] = Z1err[x]; // printf("Z1Errs(%d) = %g \n",x , Z1Errs[x]); } Xis[x] = Xs[x] - Mx[0]; Yis[x] = Ys[x] - My[0]; __syncthreads(); Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Xis[x] * Xis[x] + Yis[x] * Yis[x]); // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } __syncthreads(); if (x<1) { double Mxx = 0; double Myy = 0; double Mxy = 0; double Mxz = 0; double Myz = 0; double Mzz = 0; double wsum = 0.; double wx = 0.; double wy = 0.; double wxx = 0.; double wxy = 0.; for (int i = 0; i<30; i++){ Mxy += Xis[i] * Yis[i]; Mxx += Xis[i] * Xis[i]; Myy += Yis[i] * Yis[i]; Mxz += Xis[i] * Zis[i]; Myz += Yis[i] * Zis[i]; Mzz += Zis[i] * Zis[i]; wsum += fZWeight[i]; wx += fZWeight[i] * rho[i]; wy += fZWeight[i] * Z1s[i]; wxx += fZWeight[i] * rho[i] * rho[i]; wxy += fZWeight[i] * rho[i] * Z1s[i]; // printf("Weights =%f %f %f \n" ,fZWeight[i],rho[i],Z1s[i] ); } Mxx /= M0[0]; Myy /= M0[0]; Mxy /= M0[0]; Mxz /= M0[0]; Myz /= M0[0]; Mzz /= M0[0]; // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = Newton(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); double mm = 0.; double qq = 0.; double det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3] = -mm; result[4] = qq; double r1; double chi2 = 0.; for (int i = 0; i < 30; i++) { r1 = Z1s[i] + result[3] * rho[i] - result[4]; chi2 += fZWeight[i] * (r1 * r1); } result[5] = chi2; if (det>0.00001){ double varsq = sqrt(chi2 / 30.); result[6] = varsq * sqrt(wsum / det); result[7] = varsq * sqrt(wxx / det); } else{ result[6] = 0; result[7] = 0; } } } /* inline __device__ int NewtonF(float Mxx, float Myy, float Mxy, float Mxz, float Myz, float Mzz, float *result) { float Mz, Mxz2, Myz2, Cov_xy; //,temp; float A0, A1, A2, A22, epsilon = 0.000000000001; float Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters float GAM, DET; // float Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[0] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[0] * result[0] + result[1] * result[1] - GAM) < 0.) return 0; result[2] = sqrt(result[0] * result[0] + result[1] * result[1] - GAM); return 1; } __global__ void FitF(float *X, float *Y, float *Z1, float *Z1err, float *Mx1, float *My1, float *M01, float *result) { unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Negative root: x=%f\n"); __shared__ float Xs[15]; __shared__ float Ys[15]; __shared__ float Xis[15]; __shared__ float Yis[15]; __shared__ float Zis[15]; __shared__ float Z1s[15]; __shared__ float Z1Errs[15]; __shared__ float rho[15]; __shared__ float fZWeight[15]; __shared__ float Mx[1]; __shared__ float My[1]; __shared__ float M0[1]; // if(x < 30){ Xs[x] = X[x]; Ys[x] = Y[x]; Z1s[x] = Z1[x]; Z1Errs[x] = Z1err[x]; // printf("Z1Errs(%d) = %g \n",x , Z1Errs[x]); // } if (x<1){ Mx[0] = Mx1[0]; My[0] = My1[0]; M0[0] = M01[0]; } __syncthreads(); Xis[x] = Xs[x] - Mx[0]; Yis[x] = Ys[x] - My[0]; Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Xis[x] * Xis[x] + Yis[x] * Yis[x]); // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } __syncthreads(); if (x<1) { float Mxx = 0; float Myy = 0; float Mxy = 0; float Mxz = 0; float Myz = 0; float Mzz = 0; float wsum = 0.; float wx = 0.; float wy = 0.; float wxx = 0.; float wxy = 0.; for (int i = 0; i<15; i++){ Mxy += Xis[i] * Yis[i]; Mxx += Xis[i] * Xis[i]; Myy += Yis[i] * Yis[i]; Mxz += Xis[i] * Zis[i]; Myz += Yis[i] * Zis[i]; Mzz += Zis[i] * Zis[i]; wsum += fZWeight[i]; wx += fZWeight[i] * rho[i]; wy += fZWeight[i] * Z1s[i]; wxx += fZWeight[i] * rho[i] * rho[i]; wxy += fZWeight[i] * rho[i] * Z1s[i]; // printf("Weights =%f %f %f \n" ,fZWeight[i],rho[i],Z1s[i] ); } Mxx /= M0[0]; Myy /= M0[0]; Mxy /= M0[0]; Mxz /= M0[0]; Myz /= M0[0]; Mzz /= M0[0]; // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = NewtonF(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); float mm = 0.; float qq = 0.; float det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3] = -mm; result[4] = qq; float r1; float chi2 = 0.; for (int i = 0; i < 15; i++) { r1 = Z1s[i] + result[3] * rho[i] - result[4]; chi2 += fZWeight[i] * (r1 * r1); } result[5] = chi2; if (det>0.00001){ float varsq = sqrt(chi2 / 13.); result[6] = varsq * sqrt(wsum / det); result[7] = varsq * sqrt(wxx / det); } else{ result[6] = 0; result[7] = 0; } } } inline __device__ int NewtonAllF(float Mxx, float Myy, float Mxy, float Mxz, float Myz, float Mzz, float *result) { unsigned int y = blockIdx.x; float Mz, Mxz2, Myz2, Cov_xy; //,temp; float A0, A1, A2, A22, epsilon = 0.000000000001; float Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters float GAM, DET; // float Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[8 * y] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1 + 8 * y] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM) < 0.) return 0; result[2 + 8 * y] = sqrt(result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM); return 1; } __global__ void FitAllF(float *X, float *Y, float *Z1, float *Z1err, float *Mx1, float *My1, float *M01, float *result) { // unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y = blockIdx.x; unsigned int x = threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Block No: =%d , threadId.x =%d \n",y,x); __shared__ float Xs[30]; __shared__ float Ys[30]; __shared__ float Xis[30]; __shared__ float Yis[30]; __shared__ float Zis[30]; __shared__ float Z1s[30]; __shared__ float Z1Errs[30]; __shared__ float rho[30]; __shared__ float fZWeight[30]; __shared__ float Mx[100]; __shared__ float My[100]; __shared__ float M0[100]; float Mxx = 0.; float Myy = 0.; float Mxy = 0.; float Mxz = 0.; float Myz = 0.; float Mzz = 0.; float wsum = 0.; float wx = 0.; float wy = 0.; float wxx = 0.; float wxy = 0.; Xs[x] = X[x + 30 * y]; Ys[x] = Y[x + 30 * y]; Z1s[x] = Z1[x + 30 * y]; Z1Errs[x] = Z1err[x + 30 * y]; // printf("blockid = %d Z1Errs(%d) = %g \n",y, x , Z1Errs[x]); // if(x < 8) result[x+8*y]=0.000; if (x<1){ Mx[y] = Mx1[y]; My[y] = My1[y]; M0[y] = M01[y]; } __syncthreads(); Xis[x] = Xs[x] - Mx[y]; Yis[x] = Ys[x] - My[y]; Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Zis[x]); Mxy += Xis[x] * Yis[x] / M0[y]; Mxx += Xis[x] * Xis[x] / M0[y]; Myy += Yis[x] * Yis[x] / M0[y]; Mxz += Xis[x] * Zis[x] / M0[y]; Myz += Yis[x] * Zis[x] / M0[y]; Mzz += Zis[x] * Zis[x] / M0[y]; // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } wsum += fZWeight[x]; wx += fZWeight[x] * rho[x]; wy += fZWeight[x] * Z1s[x]; wxx += fZWeight[x] * Z1s[x]; wxy += fZWeight[x] * rho[x] * Z1s[x]; __syncthreads(); if (x<1) { // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = NewtonAllF(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); float mm = 0.; float qq = 0.; float det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3 + 8 * y] = -mm; result[4 + 8 * y] = qq; float r1; float chi2 = 0.; for (int i = 0; i < 30; i++) { r1 = Z1s[i] + result[3 + 8 * y] * rho[i] - result[4 + 8 * y]; chi2 += fZWeight[i] * (r1 * r1); } result[5 + 8 * y] = chi2; if (det>0.00001){ float varsq = sqrt(chi2 / 25.); result[6 + 8 * y] = varsq * sqrt(wsum / det); result[7 + 8 * y] = varsq * sqrt(wxx / det); } else{ result[6 + 8 * y] = 0; result[7 + 8 * y] = 0; } } } inline __device__ int NewtonAllD(double Mxx, double Myy, double Mxy, double Mxz, double Myz, double Mzz, double *result) { unsigned int y = blockIdx.x; double Mz, Mxz2, Myz2, Cov_xy; //,temp; double A0, A1, A2, A22, epsilon = 0.000000000001; double Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters double GAM, DET; // double Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[8 * y] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1 + 8 * y] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM) < 0.) return 0; result[2 + 8 * y] = sqrt(result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM); return 1; } __global__ void FitAllD(double *X, double *Y, double *Z1, double *Z1err, double *Mx, double *My, double *M0, double *result) { // unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y = blockIdx.x; unsigned int x = threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Block No: =%d , threadId.x =%d \n",y,x); __shared__ double Xs[30]; __shared__ double Ys[30]; __shared__ double Xis[30]; __shared__ double Yis[30]; __shared__ double Zis[30]; __shared__ double Z1s[30]; __shared__ double Z1Errs[30]; __shared__ double rho[30]; __shared__ double fZWeight[30]; double Mxx = 0.; double Myy = 0.; double Mxy = 0.; double Mxz = 0.; double Myz = 0.; double Mzz = 0.; double wsum = 0.; double wx = 0.; double wy = 0.; double wxx = 0.; double wxy = 0.; Xs[x] = X[x + 30 * y]; Ys[x] = Y[x + 30 * y]; Z1s[x] = Z1[x + 30 * y]; Z1Errs[x] = Z1err[x + 30 * y]; // printf("blockid = %d Z1Errs(%d) = %g \n",y, x , Z1Errs[x]); // if(x < 8) result[x+8*y]=0.000; __syncthreads(); Xis[x] = Xs[x] - Mx[y]; Yis[x] = Ys[x] - My[y]; Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Zis[x]); Mxy += Xis[x] * Yis[x] / M0[y]; Mxx += Xis[x] * Xis[x] / M0[y]; Myy += Yis[x] * Yis[x] / M0[y]; Mxz += Xis[x] * Zis[x] / M0[y]; Myz += Yis[x] * Zis[x] / M0[y]; Mzz += Zis[x] * Zis[x] / M0[y]; // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } wsum += fZWeight[x]; wx += fZWeight[x] * rho[x]; wy += fZWeight[x] * Z1s[x]; wxx += fZWeight[x] * Z1s[x]; wxy += fZWeight[x] * rho[x] * Z1s[x]; __syncthreads(); if (x<1) { // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = NewtonAllD(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); double mm = 0.; double qq = 0.; double det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3 + 8 * y] = -mm; result[4 + 8 * y] = qq; double r1; double chi2 = 0.; for (int i = 0; i < 30; i++) { r1 = Z1s[i] + result[3 + 8 * y] * rho[i] - result[4 + 8 * y]; chi2 += fZWeight[i] * (r1 * r1); } result[5 + 8 * y] = chi2; if (det>0.00001){ double varsq = sqrt(chi2 / 25.); result[6 + 8 * y] = varsq * sqrt(wsum / det); result[7 + 8 * y] = varsq * sqrt(wxx / det); } else{ result[6 + 8 * y] = 0; result[7 + 8 * y] = 0; } } } #endif // #ifndef _TRACKFIT_KERNEL_H_ */
4256b378ff5dc802bba4ee802cb9568869f1c41b.cu
#ifndef _TRACKFIT_KERNEL_H_ #define _TRACKFIT_KERNEL_H_ #include <stdio.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> inline __device__ int Newton(double Mxx, double Myy, double Mxy, double Mxz, double Myz, double Mzz, double *result) { double Mz, Mxz2, Myz2, Cov_xy; //,temp; double A0, A1, A2, A22, epsilon = 0.000000000001; double Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters double GAM, DET; // double Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[0] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[0] * result[0] + result[1] * result[1] - GAM) < 0.) return 0; result[2] = sqrt(result[0] * result[0] + result[1] * result[1] - GAM); return 1; } __global__ void Fit(double *X, double *Y, double *Z1, double *Z1err, double *Mx, double *My, double *M0, double *result) { unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Negative root: x=%f\n"); __shared__ double Xs[30]; __shared__ double Ys[30]; __shared__ double Xis[30]; __shared__ double Yis[30]; __shared__ double Zis[30]; __shared__ double Z1s[30]; __shared__ double Z1Errs[30]; __shared__ double rho[30]; __shared__ double fZWeight[30]; if (x < 30){ Xs[x] = X[x]; Ys[x] = Y[x]; Z1s[x] = Z1[x]; Z1Errs[x] = Z1err[x]; // printf("Z1Errs(%d) = %g \n",x , Z1Errs[x]); } Xis[x] = Xs[x] - Mx[0]; Yis[x] = Ys[x] - My[0]; __syncthreads(); Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Xis[x] * Xis[x] + Yis[x] * Yis[x]); // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } __syncthreads(); if (x<1) { double Mxx = 0; double Myy = 0; double Mxy = 0; double Mxz = 0; double Myz = 0; double Mzz = 0; double wsum = 0.; double wx = 0.; double wy = 0.; double wxx = 0.; double wxy = 0.; for (int i = 0; i<30; i++){ Mxy += Xis[i] * Yis[i]; Mxx += Xis[i] * Xis[i]; Myy += Yis[i] * Yis[i]; Mxz += Xis[i] * Zis[i]; Myz += Yis[i] * Zis[i]; Mzz += Zis[i] * Zis[i]; wsum += fZWeight[i]; wx += fZWeight[i] * rho[i]; wy += fZWeight[i] * Z1s[i]; wxx += fZWeight[i] * rho[i] * rho[i]; wxy += fZWeight[i] * rho[i] * Z1s[i]; // printf("Weights =%f %f %f \n" ,fZWeight[i],rho[i],Z1s[i] ); } Mxx /= M0[0]; Myy /= M0[0]; Mxy /= M0[0]; Mxz /= M0[0]; Myz /= M0[0]; Mzz /= M0[0]; // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = Newton(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); double mm = 0.; double qq = 0.; double det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3] = -mm; result[4] = qq; double r1; double chi2 = 0.; for (int i = 0; i < 30; i++) { r1 = Z1s[i] + result[3] * rho[i] - result[4]; chi2 += fZWeight[i] * (r1 * r1); } result[5] = chi2; if (det>0.00001){ double varsq = sqrt(chi2 / 30.); result[6] = varsq * sqrt(wsum / det); result[7] = varsq * sqrt(wxx / det); } else{ result[6] = 0; result[7] = 0; } } } /* inline __device__ int NewtonF(float Mxx, float Myy, float Mxy, float Mxz, float Myz, float Mzz, float *result) { float Mz, Mxz2, Myz2, Cov_xy; //,temp; float A0, A1, A2, A22, epsilon = 0.000000000001; float Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters float GAM, DET; // float Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[0] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[0] * result[0] + result[1] * result[1] - GAM) < 0.) return 0; result[2] = sqrt(result[0] * result[0] + result[1] * result[1] - GAM); return 1; } __global__ void FitF(float *X, float *Y, float *Z1, float *Z1err, float *Mx1, float *My1, float *M01, float *result) { unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Negative root: x=%f\n"); __shared__ float Xs[15]; __shared__ float Ys[15]; __shared__ float Xis[15]; __shared__ float Yis[15]; __shared__ float Zis[15]; __shared__ float Z1s[15]; __shared__ float Z1Errs[15]; __shared__ float rho[15]; __shared__ float fZWeight[15]; __shared__ float Mx[1]; __shared__ float My[1]; __shared__ float M0[1]; // if(x < 30){ Xs[x] = X[x]; Ys[x] = Y[x]; Z1s[x] = Z1[x]; Z1Errs[x] = Z1err[x]; // printf("Z1Errs(%d) = %g \n",x , Z1Errs[x]); // } if (x<1){ Mx[0] = Mx1[0]; My[0] = My1[0]; M0[0] = M01[0]; } __syncthreads(); Xis[x] = Xs[x] - Mx[0]; Yis[x] = Ys[x] - My[0]; Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Xis[x] * Xis[x] + Yis[x] * Yis[x]); // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } __syncthreads(); if (x<1) { float Mxx = 0; float Myy = 0; float Mxy = 0; float Mxz = 0; float Myz = 0; float Mzz = 0; float wsum = 0.; float wx = 0.; float wy = 0.; float wxx = 0.; float wxy = 0.; for (int i = 0; i<15; i++){ Mxy += Xis[i] * Yis[i]; Mxx += Xis[i] * Xis[i]; Myy += Yis[i] * Yis[i]; Mxz += Xis[i] * Zis[i]; Myz += Yis[i] * Zis[i]; Mzz += Zis[i] * Zis[i]; wsum += fZWeight[i]; wx += fZWeight[i] * rho[i]; wy += fZWeight[i] * Z1s[i]; wxx += fZWeight[i] * rho[i] * rho[i]; wxy += fZWeight[i] * rho[i] * Z1s[i]; // printf("Weights =%f %f %f \n" ,fZWeight[i],rho[i],Z1s[i] ); } Mxx /= M0[0]; Myy /= M0[0]; Mxy /= M0[0]; Mxz /= M0[0]; Myz /= M0[0]; Mzz /= M0[0]; // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = NewtonF(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); float mm = 0.; float qq = 0.; float det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3] = -mm; result[4] = qq; float r1; float chi2 = 0.; for (int i = 0; i < 15; i++) { r1 = Z1s[i] + result[3] * rho[i] - result[4]; chi2 += fZWeight[i] * (r1 * r1); } result[5] = chi2; if (det>0.00001){ float varsq = sqrt(chi2 / 13.); result[6] = varsq * sqrt(wsum / det); result[7] = varsq * sqrt(wxx / det); } else{ result[6] = 0; result[7] = 0; } } } inline __device__ int NewtonAllF(float Mxx, float Myy, float Mxy, float Mxz, float Myz, float Mzz, float *result) { unsigned int y = blockIdx.x; float Mz, Mxz2, Myz2, Cov_xy; //,temp; float A0, A1, A2, A22, epsilon = 0.000000000001; float Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters float GAM, DET; // float Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[8 * y] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1 + 8 * y] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM) < 0.) return 0; result[2 + 8 * y] = sqrt(result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM); return 1; } __global__ void FitAllF(float *X, float *Y, float *Z1, float *Z1err, float *Mx1, float *My1, float *M01, float *result) { // unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y = blockIdx.x; unsigned int x = threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Block No: =%d , threadId.x =%d \n",y,x); __shared__ float Xs[30]; __shared__ float Ys[30]; __shared__ float Xis[30]; __shared__ float Yis[30]; __shared__ float Zis[30]; __shared__ float Z1s[30]; __shared__ float Z1Errs[30]; __shared__ float rho[30]; __shared__ float fZWeight[30]; __shared__ float Mx[100]; __shared__ float My[100]; __shared__ float M0[100]; float Mxx = 0.; float Myy = 0.; float Mxy = 0.; float Mxz = 0.; float Myz = 0.; float Mzz = 0.; float wsum = 0.; float wx = 0.; float wy = 0.; float wxx = 0.; float wxy = 0.; Xs[x] = X[x + 30 * y]; Ys[x] = Y[x + 30 * y]; Z1s[x] = Z1[x + 30 * y]; Z1Errs[x] = Z1err[x + 30 * y]; // printf("blockid = %d Z1Errs(%d) = %g \n",y, x , Z1Errs[x]); // if(x < 8) result[x+8*y]=0.000; if (x<1){ Mx[y] = Mx1[y]; My[y] = My1[y]; M0[y] = M01[y]; } __syncthreads(); Xis[x] = Xs[x] - Mx[y]; Yis[x] = Ys[x] - My[y]; Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Zis[x]); Mxy += Xis[x] * Yis[x] / M0[y]; Mxx += Xis[x] * Xis[x] / M0[y]; Myy += Yis[x] * Yis[x] / M0[y]; Mxz += Xis[x] * Zis[x] / M0[y]; Myz += Yis[x] * Zis[x] / M0[y]; Mzz += Zis[x] * Zis[x] / M0[y]; // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } wsum += fZWeight[x]; wx += fZWeight[x] * rho[x]; wy += fZWeight[x] * Z1s[x]; wxx += fZWeight[x] * Z1s[x]; wxy += fZWeight[x] * rho[x] * Z1s[x]; __syncthreads(); if (x<1) { // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = NewtonAllF(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); float mm = 0.; float qq = 0.; float det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3 + 8 * y] = -mm; result[4 + 8 * y] = qq; float r1; float chi2 = 0.; for (int i = 0; i < 30; i++) { r1 = Z1s[i] + result[3 + 8 * y] * rho[i] - result[4 + 8 * y]; chi2 += fZWeight[i] * (r1 * r1); } result[5 + 8 * y] = chi2; if (det>0.00001){ float varsq = sqrt(chi2 / 25.); result[6 + 8 * y] = varsq * sqrt(wsum / det); result[7 + 8 * y] = varsq * sqrt(wxx / det); } else{ result[6 + 8 * y] = 0; result[7 + 8 * y] = 0; } } } inline __device__ int NewtonAllD(double Mxx, double Myy, double Mxy, double Mxz, double Myz, double Mzz, double *result) { unsigned int y = blockIdx.x; double Mz, Mxz2, Myz2, Cov_xy; //,temp; double A0, A1, A2, A22, epsilon = 0.000000000001; double Dy, xnew, xold, ynew, yold = 100000000000.; Mz = Mxx + Myy; Cov_xy = Mxx*Myy - Mxy*Mxy; Mxz2 = Mxz*Mxz; Myz2 = Myz*Myz; A2 = 4.*Cov_xy - 3.*Mz*Mz - Mzz; A1 = Mzz*Mz + 4.*Cov_xy*Mz - Mxz2 - Myz2 - Mz*Mz*Mz; A0 = Mxz2*Myy + Myz2*Mxx - Mzz*Cov_xy - 2.*Mxz*Myz*Mxy + Mz*Mz*Cov_xy; A22 = A2 + A2; // iter = 0; xnew = 0.; // Newton's method starting at x=0 int iter, iterMax = 20; for (iter = 0; iter < iterMax; iter++) { ynew = A0 + xnew*(A1 + xnew*(A2 + 4.*xnew*xnew)); if (fabs(ynew)>fabs(yold)) { // printf("Newton2 goes wrong direction: ynew=%f // yold=%f\n",ynew,yold); xnew = 0.; break; } Dy = A1 + xnew*(A22 + 16.*xnew*xnew); xold = xnew; xnew = xold - ynew / Dy; if (fabs(xnew) < epsilon) break; if (fabs((xnew - xold) / xnew) < epsilon) break; } if (iter == iterMax - 1) { // printf("Newton2 does not converge in %d // iterations\n",iterMax); xnew = 0.; } if (xnew < 0.) { iter = 30; // printf("Negative root: x=%f\n",xnew); } // computing the circle parameters double GAM, DET; // double Xcenter,Ycenter,Radius; GAM = -Mz - xnew - xnew; DET = xnew*xnew - xnew*Mz + Cov_xy; if (DET == 0) return 0; result[8 * y] = (Mxz*(Myy - xnew) - Myz*Mxy) / DET / 2.; result[1 + 8 * y] = (Myz*(Mxx - xnew) - Mxz*Mxy) / DET / 2.; if ((result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM) < 0.) return 0; result[2 + 8 * y] = sqrt(result[8 * y] * result[8 * y] + result[1 + 8 * y] * result[1 + 8 * y] - GAM); return 1; } __global__ void FitAllD(double *X, double *Y, double *Z1, double *Z1err, double *Mx, double *My, double *M0, double *result) { // unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y = blockIdx.x; unsigned int x = threadIdx.x; // unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; // printf("Block No: =%d , threadId.x =%d \n",y,x); __shared__ double Xs[30]; __shared__ double Ys[30]; __shared__ double Xis[30]; __shared__ double Yis[30]; __shared__ double Zis[30]; __shared__ double Z1s[30]; __shared__ double Z1Errs[30]; __shared__ double rho[30]; __shared__ double fZWeight[30]; double Mxx = 0.; double Myy = 0.; double Mxy = 0.; double Mxz = 0.; double Myz = 0.; double Mzz = 0.; double wsum = 0.; double wx = 0.; double wy = 0.; double wxx = 0.; double wxy = 0.; Xs[x] = X[x + 30 * y]; Ys[x] = Y[x + 30 * y]; Z1s[x] = Z1[x + 30 * y]; Z1Errs[x] = Z1err[x + 30 * y]; // printf("blockid = %d Z1Errs(%d) = %g \n",y, x , Z1Errs[x]); // if(x < 8) result[x+8*y]=0.000; __syncthreads(); Xis[x] = Xs[x] - Mx[y]; Yis[x] = Ys[x] - My[y]; Zis[x] = Xis[x] * Xis[x] + Yis[x] * Yis[x]; rho[x] = sqrt(Zis[x]); Mxy += Xis[x] * Yis[x] / M0[y]; Mxx += Xis[x] * Xis[x] / M0[y]; Myy += Yis[x] * Yis[x] / M0[y]; Mxz += Xis[x] * Zis[x] / M0[y]; Myz += Yis[x] * Zis[x] / M0[y]; Mzz += Zis[x] * Zis[x] / M0[y]; // printf("rho(%d) = %g \n",x , rho[x]); if (Z1Errs[x]>0.001)fZWeight[x] = 1 / (Z1Errs[x] * Z1Errs[x]); else { // printf("Z1Err(%d) = %g \n",x , Z1Errs[x]); fZWeight[x] = 0.0; } wsum += fZWeight[x]; wx += fZWeight[x] * rho[x]; wy += fZWeight[x] * Z1s[x]; wxx += fZWeight[x] * Z1s[x]; wxy += fZWeight[x] * rho[x] * Z1s[x]; __syncthreads(); if (x<1) { // printf("Result =%f %f %f %f %f \n" ,wsum, wxy, wxx, wx, wy ); int t = NewtonAllD(Mxx, Myy, Mxy, Mxz, Myz, Mzz, result); double mm = 0.; double qq = 0.; double det = wsum * wxx - wx * wx; if (det>0.00001) { mm = (wxy * wsum - wy * wx) / det; qq = (wy * wxx - wxy * wx) / det; } else { mm = 1000.; qq = 1000.; } result[3 + 8 * y] = -mm; result[4 + 8 * y] = qq; double r1; double chi2 = 0.; for (int i = 0; i < 30; i++) { r1 = Z1s[i] + result[3 + 8 * y] * rho[i] - result[4 + 8 * y]; chi2 += fZWeight[i] * (r1 * r1); } result[5 + 8 * y] = chi2; if (det>0.00001){ double varsq = sqrt(chi2 / 25.); result[6 + 8 * y] = varsq * sqrt(wsum / det); result[7 + 8 * y] = varsq * sqrt(wxx / det); } else{ result[6 + 8 * y] = 0; result[7 + 8 * y] = 0; } } } #endif // #ifndef _TRACKFIT_KERNEL_H_ */
0a1b9f8397cd007979100ab63fbf55a6de3d02db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "shared_array.hpp" #include <thrust/device_vector.h> #include <cstdio> #include <iostream> __global__ void single_reader_hazard(thrust::device_ptr<int> result) { shared_array<128> smem; smem[threadIdx.x] = 1; // XXX missing barrier here if(threadIdx.x == 0) { // read after write int val = smem[blockDim.x - threadIdx.x - 1]; *result = val; } } __global__ void multiple_writers_hazard(thrust::device_ptr<int> result) { shared_array<128> smem; // XXX all threads write to the same location smem[0] = threadIdx.x; smem.barrier(); if(threadIdx.x == 0) { *result = smem[0]; } } __global__ void race_free_reduction(thrust::device_ptr<int> result) { shared_array<128> smem; smem[threadIdx.x] = 1; smem.barrier(); unsigned int n = 128; while(n > 1) { unsigned int half = n / 2; if(threadIdx.x < half) { smem[threadIdx.x] = smem[threadIdx.x] + smem[n - threadIdx.x - 1]; } smem.barrier(); n = n - half; } if(threadIdx.x == 0) { *result = smem[0]; } } int main() { thrust::device_vector<int> vec(1); hipLaunchKernelGGL(( race_free_reduction), dim3(1),dim3(128), 0, 0, vec.data()); std::cout << "race_free_reduction result is " << vec[0] << std::endl; hipLaunchKernelGGL(( single_reader_hazard), dim3(1),dim3(128), 0, 0, vec.data()); std::cout << "single_reader_hazard result is " << vec[0] << std::endl; hipLaunchKernelGGL(( multiple_writers_hazard), dim3(1),dim3(128), 0, 0, vec.data()); std::cout << "multiple_writers_hazard result is " << vec[0] << std::endl; return 0; }
0a1b9f8397cd007979100ab63fbf55a6de3d02db.cu
#include "shared_array.hpp" #include <thrust/device_vector.h> #include <cstdio> #include <iostream> __global__ void single_reader_hazard(thrust::device_ptr<int> result) { shared_array<128> smem; smem[threadIdx.x] = 1; // XXX missing barrier here if(threadIdx.x == 0) { // read after write int val = smem[blockDim.x - threadIdx.x - 1]; *result = val; } } __global__ void multiple_writers_hazard(thrust::device_ptr<int> result) { shared_array<128> smem; // XXX all threads write to the same location smem[0] = threadIdx.x; smem.barrier(); if(threadIdx.x == 0) { *result = smem[0]; } } __global__ void race_free_reduction(thrust::device_ptr<int> result) { shared_array<128> smem; smem[threadIdx.x] = 1; smem.barrier(); unsigned int n = 128; while(n > 1) { unsigned int half = n / 2; if(threadIdx.x < half) { smem[threadIdx.x] = smem[threadIdx.x] + smem[n - threadIdx.x - 1]; } smem.barrier(); n = n - half; } if(threadIdx.x == 0) { *result = smem[0]; } } int main() { thrust::device_vector<int> vec(1); race_free_reduction<<<1,128>>>(vec.data()); std::cout << "race_free_reduction result is " << vec[0] << std::endl; single_reader_hazard<<<1,128>>>(vec.data()); std::cout << "single_reader_hazard result is " << vec[0] << std::endl; multiple_writers_hazard<<<1,128>>>(vec.data()); std::cout << "multiple_writers_hazard result is " << vec[0] << std::endl; return 0; }
f47b38ae1ad82a2d6bad00edb87083f22cedf305.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } bools[index] = idata[index] != 0? 1 : 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } if (bools[index] == 1) { odata[indices[index]] = idata[index]; } } __global__ void shiftRight(int n, int* idata, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } if (index == 0) { odata[index] = 0; } else { odata[index] = idata[index - 1]; } } /* Copy initial data over and pad 0's if out of scope of initial size * aka the input array has a smaller initial size than the final array, * and anything larger than index [size of input array] will be 0 in the output array */ __global__ void formatInitData(int initSize, int finalSize, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= initSize && index < finalSize) { data[index] = 0; } } } }
f47b38ae1ad82a2d6bad00edb87083f22cedf305.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } bools[index] = idata[index] != 0? 1 : 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { // TODO int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } if (bools[index] == 1) { odata[indices[index]] = idata[index]; } } __global__ void shiftRight(int n, int* idata, int* odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= n) { return; } if (index == 0) { odata[index] = 0; } else { odata[index] = idata[index - 1]; } } /* Copy initial data over and pad 0's if out of scope of initial size * aka the input array has a smaller initial size than the final array, * and anything larger than index [size of input array] will be 0 in the output array */ __global__ void formatInitData(int initSize, int finalSize, int* data) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= initSize && index < finalSize) { data[index] = 0; } } } }
adcca9dd43af5a75ee8c3c8bd6fd8d6c7d9e6683.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// // x - input, y - paddings, z - output template<typename X, typename Y> __global__ static void padCuda(const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void *vPadVal) { const X padVal = *reinterpret_cast<const X*>(vPadVal); const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank, rankMinusOne; __shared__ Nd4jLong zLen, totalThreads, *coords, *xShape, *zShape, shift1, shift2, yStride0; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)); zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)); yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0]; rank = shape::rank(xShapeInfo); zLen = shape::length(zShapeInfo); rankMinusOne = rank - 1; totalThreads = gridDim.x * blockDim.x; shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC } __syncthreads(); auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(mode == 0) { // CONSTANT case for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); bool within = true; for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;} else {xzCoord[j] = xzCoord[j] - left;} } if(within) z[zOffset] = x[shape::getOffset(xShapeInfo, xzCoord)]; else z[zOffset] = padVal; } } else { // REFLECT and SYMMETRIC cases for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; // are ready to fill middle (within input dimension range) if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right } const auto xOffset = shape::getOffset(xShapeInfo, xzCoord); z[zOffset] = x[xOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* padVal) { hipLaunchKernelGGL(( padCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal); } /////////////////////////////////////////////////////////////////// void pad(sd::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) { PointersManager manager(context, "pad"); NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue}); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128; const auto xType = input.dataType(); const auto yType = paddings.dataType(); BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.specialBuffer(), input.specialShapeInfo(), paddings.specialBuffer(), paddings.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), padValue.specialBuffer()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue}); manager.synchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mirrorPadLinearKernel(void const* vx, const Nd4jLong* xShape, void* vz, const Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) { __shared__ T const* x; __shared__ T* z; if (threadIdx.x == 0) { x = reinterpret_cast<T const*>(vx); z = reinterpret_cast<T*>(vz); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for(int i = start; i < zLen; i+= step) { auto zIndex = shape::getIndexOffset(i, zShape); auto xIndex = shape::getIndexOffset(len - i, xShape); if (i < leftSide) // left side xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape); else if(i >= leftSide && i < leftSide + xLen) // middle xIndex = shape::getIndexOffset(i - leftSide, xShape); // else // right side // z[i] = x[len - i]; z[zIndex] = x[xIndex]; } } template <typename F, typename I> static __global__ void mirrorPadKernel(void const* vx, const Nd4jLong* xShape, void* vz, const Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, const Nd4jLong* paddingShape, int reflBorder) { __shared__ F const* x; __shared__ I const* pads; __shared__ F* z; __shared__ Nd4jLong zRank, rank; __shared__ Nd4jLong* xIdx; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; xIdx = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(xShape); x = reinterpret_cast<F const*>(vx);// pads = reinterpret_cast<I const*>(paddings); z = reinterpret_cast<F*>(vz); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(Nd4jLong i = start; i < outLen; i+= step) { auto xzCoord = xIdx + threadIdx.x * rank; //auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank; shape::index2coords(i, zShape, xzCoord); auto outOffset = shape::getOffset(zShape, xzCoord); // auto intStep = blockDim.y * gridDim.y; for(int j = 0; j < rank; j++) { const Nd4jLong inLen = shape::sizeAt(xShape, j); Nd4jLong coords[2] = {j, 0}; auto padOffset = shape::getOffset(paddingShape, coords); // padding already has rank 2 const auto leftSide = pads[padOffset]; const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder; if(xzCoord[j] < leftSide) // left side xzCoord[j] = leftSideCorrected - xzCoord[j]; else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle xzCoord[j] = xzCoord[j] - leftSide; else if (len > xzCoord[j]) // right side xzCoord[j] = len - xzCoord[j]; else xzCoord[j] = xzCoord[j] - len; } auto inOffset = shape::getOffset(xShape, xzCoord); z[outOffset] = x[inOffset]; } } template<typename F, typename I> static void mirrorPad_(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { // mode: 0 - REFLECT, else - SYMMETRIC const int reflBorder = (bool)mode ? 1 : 0; const int rank = input.rankOf(); const Nd4jLong outLen = output.lengthOf(); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input, &paddings}); if(rank <= 1) { const Nd4jLong inLen = input.lengthOf(); const auto leftSide = paddings.e<Nd4jLong>(0); const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder; hipLaunchKernelGGL(( mirrorPadLinearKernel<F>), dim3(256), dim3(512), 256, *stream, input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed"); } else { const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (outLen + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * input.rankOf() + 256; hipLaunchKernelGGL(( mirrorPadKernel<F, I>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.specialBuffer(), paddings.specialShapeInfo(), reflBorder); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed"); } NDArray::registerSpecialUse({&output}, {&input, &paddings}); } void mirrorPad(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INDEXING_TYPES); } } } }
adcca9dd43af5a75ee8c3c8bd6fd8d6c7d9e6683.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// // x - input, y - paddings, z - output template<typename X, typename Y> __global__ static void padCuda(const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void *vPadVal) { const X padVal = *reinterpret_cast<const X*>(vPadVal); const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank, rankMinusOne; __shared__ Nd4jLong zLen, totalThreads, *coords, *xShape, *zShape, shift1, shift2, yStride0; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)); zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)); yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0]; rank = shape::rank(xShapeInfo); zLen = shape::length(zShapeInfo); rankMinusOne = rank - 1; totalThreads = gridDim.x * blockDim.x; shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC } __syncthreads(); auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(mode == 0) { // CONSTANT case for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); bool within = true; for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;} else {xzCoord[j] = xzCoord[j] - left;} } if(within) z[zOffset] = x[shape::getOffset(xShapeInfo, xzCoord)]; else z[zOffset] = padVal; } } else { // REFLECT and SYMMETRIC cases for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, xzCoord); const auto zOffset = shape::getOffset(zShapeInfo, xzCoord); for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo)]; // are ready to fill middle (within input dimension range) if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right } const auto xOffset = shape::getOffset(xShapeInfo, xzCoord); z[zOffset] = x[xOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* padVal) { padCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal); } /////////////////////////////////////////////////////////////////// void pad(sd::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) { PointersManager manager(context, "pad"); NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue}); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128; const auto xType = input.dataType(); const auto yType = paddings.dataType(); BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.specialBuffer(), input.specialShapeInfo(), paddings.specialBuffer(), paddings.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), padValue.specialBuffer()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue}); manager.synchronize(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mirrorPadLinearKernel(void const* vx, const Nd4jLong* xShape, void* vz, const Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) { __shared__ T const* x; __shared__ T* z; if (threadIdx.x == 0) { x = reinterpret_cast<T const*>(vx); z = reinterpret_cast<T*>(vz); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for(int i = start; i < zLen; i+= step) { auto zIndex = shape::getIndexOffset(i, zShape); auto xIndex = shape::getIndexOffset(len - i, xShape); if (i < leftSide) // left side xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape); else if(i >= leftSide && i < leftSide + xLen) // middle xIndex = shape::getIndexOffset(i - leftSide, xShape); // else // right side // z[i] = x[len - i]; z[zIndex] = x[xIndex]; } } template <typename F, typename I> static __global__ void mirrorPadKernel(void const* vx, const Nd4jLong* xShape, void* vz, const Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, const Nd4jLong* paddingShape, int reflBorder) { __shared__ F const* x; __shared__ I const* pads; __shared__ F* z; __shared__ Nd4jLong zRank, rank; __shared__ Nd4jLong* xIdx; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; xIdx = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(xShape); x = reinterpret_cast<F const*>(vx);// pads = reinterpret_cast<I const*>(paddings); z = reinterpret_cast<F*>(vz); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(Nd4jLong i = start; i < outLen; i+= step) { auto xzCoord = xIdx + threadIdx.x * rank; //auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank; shape::index2coords(i, zShape, xzCoord); auto outOffset = shape::getOffset(zShape, xzCoord); // auto intStep = blockDim.y * gridDim.y; for(int j = 0; j < rank; j++) { const Nd4jLong inLen = shape::sizeAt(xShape, j); Nd4jLong coords[2] = {j, 0}; auto padOffset = shape::getOffset(paddingShape, coords); // padding already has rank 2 const auto leftSide = pads[padOffset]; const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder; if(xzCoord[j] < leftSide) // left side xzCoord[j] = leftSideCorrected - xzCoord[j]; else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle xzCoord[j] = xzCoord[j] - leftSide; else if (len > xzCoord[j]) // right side xzCoord[j] = len - xzCoord[j]; else xzCoord[j] = xzCoord[j] - len; } auto inOffset = shape::getOffset(xShape, xzCoord); z[outOffset] = x[inOffset]; } } template<typename F, typename I> static void mirrorPad_(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { // mode: 0 - REFLECT, else - SYMMETRIC const int reflBorder = (bool)mode ? 1 : 0; const int rank = input.rankOf(); const Nd4jLong outLen = output.lengthOf(); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input, &paddings}); if(rank <= 1) { const Nd4jLong inLen = input.lengthOf(); const auto leftSide = paddings.e<Nd4jLong>(0); const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder; mirrorPadLinearKernel<F><<<256, 512, 256, *stream>>>(input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed"); } else { const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (outLen + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * input.rankOf() + 256; mirrorPadKernel<F, I><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.specialBuffer(), paddings.specialShapeInfo(), reflBorder); sd::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed"); } NDArray::registerSpecialUse({&output}, {&input, &paddings}); } void mirrorPad(sd::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INDEXING_TYPES); } } } }