hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
a7f69c9cbd26ae8f9d1880ae7810c7250427328a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#define initTimer struct timeval tv1, tv2; struct timezone tz
#define startTimer gettimeofday(&tv1, &tz)
#define stopTimer gettimeofday(&tv2, &tz)
#define tpsCalcul (tv2.tv_sec-tv1.tv_sec)*1000000L + (tv2.tv_usec-tv1.tv_usec)
#define MAX_DIM_GRID 65535
#define MAX_DIM_BLOCK 1024
long tailleVecteur ;
/* KERNEL CUDA */
__global__ void add_vec_scalaire_gpu(int *vec, int *res, int a, long N) {
long i = (long)blockIdx.x * (long)blockDim.x + (long)threadIdx.x;
if (i < N) {
res[i] = vec[i] + a;
}
}
void add_vec_scalaire_cpu(int *vec, int *res, int a, long N)
{
int i ;
for (i=0 ; i < N ; i ++) {
res[i] = vec[i] + a;
}
}
int main(int argc, char *argv[]) {
int alpha = 10;
if (argc < 2) {
printf("Erreur, manque un argument\n");
exit(0);
}
tailleVecteur = atol(argv[1]);
long blocksize = 1;
if (argc ==3) {
blocksize = atoi(argv[2]);
}
int *vecteur;
int *resultat;
int *cudaVec;
int *cudaRes;
initTimer;
long size = sizeof(int)*tailleVecteur;
vecteur = (int *)malloc(size);
resultat = (int *)malloc(size);
if (vecteur == NULL) {
printf("Allocation memoire qui pose probleme (vecteur) \n");
}
if (resultat == NULL) {
printf("Allocation memoire qui pose probleme (resultat) \n");
}
long i ;
for (i= 0 ; i < tailleVecteur ; i++) {
vecteur[i] = rand() % 100;
resultat[i] = 0;
}
/* hipSetDevice(1); */
if (hipMalloc((void **)&cudaVec, size) == hipErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaVec) \n");
}
if (hipMalloc((void **)&cudaRes, size) == hipErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaRes) \n");
}
long dimBlock = blocksize;
long dimGrid = tailleVecteur/blocksize;
if ((tailleVecteur % blocksize) != 0) {
dimGrid++;
}
int res = hipMemcpy(&cudaVec[0], &vecteur[0], size, hipMemcpyHostToDevice);
printf("Copy CPU -> GPU %d \n",res);
startTimer;
hipLaunchKernelGGL(( add_vec_scalaire_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, cudaVec, cudaRes, alpha, tailleVecteur);
hipDeviceSynchronize();
stopTimer;
printf("chrono_gpu %ld \n", tpsCalcul);
hipMemcpy(&resultat[0], &cudaRes[0], size, hipMemcpyDeviceToHost);
/* Test bon fonctionnement */
bool ok = true;
int indice = -1;
for (i= 0 ; i < tailleVecteur ; i++) {
/* printf("Resultat GPU %d Resultat CPU %d \n",resultat[i], vecteur[i]+alpha); */
if (resultat[i] != vecteur[i] + alpha) {
ok = false;
if (indice ==-1) {
indice = i;
}
}
}
printf("------ ");
printf("dimGrid %ld dimBlock %ld ",dimGrid, dimBlock);
if (ok) {
printf("Resultat ok\n");
} else {
printf("resultat NON ok (%d)\n", indice);
}
printf("Vecteur %ld => Temps calcul GPU %ld \n", tailleVecteur, tpsCalcul);
startTimer;
add_vec_scalaire_cpu (vecteur, resultat, alpha, tailleVecteur);
stopTimer;
printf("chrono_cpu %ld \n", tpsCalcul);
hipFree(cudaVec);
hipFree(cudaRes);
}
| a7f69c9cbd26ae8f9d1880ae7810c7250427328a.cu | #include <stdio.h>
#include <sys/time.h>
#define initTimer struct timeval tv1, tv2; struct timezone tz
#define startTimer gettimeofday(&tv1, &tz)
#define stopTimer gettimeofday(&tv2, &tz)
#define tpsCalcul (tv2.tv_sec-tv1.tv_sec)*1000000L + (tv2.tv_usec-tv1.tv_usec)
#define MAX_DIM_GRID 65535
#define MAX_DIM_BLOCK 1024
long tailleVecteur ;
/* KERNEL CUDA */
__global__ void add_vec_scalaire_gpu(int *vec, int *res, int a, long N) {
long i = (long)blockIdx.x * (long)blockDim.x + (long)threadIdx.x;
if (i < N) {
res[i] = vec[i] + a;
}
}
void add_vec_scalaire_cpu(int *vec, int *res, int a, long N)
{
int i ;
for (i=0 ; i < N ; i ++) {
res[i] = vec[i] + a;
}
}
int main(int argc, char *argv[]) {
int alpha = 10;
if (argc < 2) {
printf("Erreur, manque un argument\n");
exit(0);
}
tailleVecteur = atol(argv[1]);
long blocksize = 1;
if (argc ==3) {
blocksize = atoi(argv[2]);
}
int *vecteur;
int *resultat;
int *cudaVec;
int *cudaRes;
initTimer;
long size = sizeof(int)*tailleVecteur;
vecteur = (int *)malloc(size);
resultat = (int *)malloc(size);
if (vecteur == NULL) {
printf("Allocation memoire qui pose probleme (vecteur) \n");
}
if (resultat == NULL) {
printf("Allocation memoire qui pose probleme (resultat) \n");
}
long i ;
for (i= 0 ; i < tailleVecteur ; i++) {
vecteur[i] = rand() % 100;
resultat[i] = 0;
}
/* cudaSetDevice(1); */
if (cudaMalloc((void **)&cudaVec, size) == cudaErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaVec) \n");
}
if (cudaMalloc((void **)&cudaRes, size) == cudaErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaRes) \n");
}
long dimBlock = blocksize;
long dimGrid = tailleVecteur/blocksize;
if ((tailleVecteur % blocksize) != 0) {
dimGrid++;
}
int res = cudaMemcpy(&cudaVec[0], &vecteur[0], size, cudaMemcpyHostToDevice);
printf("Copy CPU -> GPU %d \n",res);
startTimer;
add_vec_scalaire_gpu<<<dimGrid, dimBlock>>>(cudaVec, cudaRes, alpha, tailleVecteur);
cudaDeviceSynchronize();
stopTimer;
printf("chrono_gpu %ld \n", tpsCalcul);
cudaMemcpy(&resultat[0], &cudaRes[0], size, cudaMemcpyDeviceToHost);
/* Test bon fonctionnement */
bool ok = true;
int indice = -1;
for (i= 0 ; i < tailleVecteur ; i++) {
/* printf("Resultat GPU %d Resultat CPU %d \n",resultat[i], vecteur[i]+alpha); */
if (resultat[i] != vecteur[i] + alpha) {
ok = false;
if (indice ==-1) {
indice = i;
}
}
}
printf("------ ");
printf("dimGrid %ld dimBlock %ld ",dimGrid, dimBlock);
if (ok) {
printf("Resultat ok\n");
} else {
printf("resultat NON ok (%d)\n", indice);
}
printf("Vecteur %ld => Temps calcul GPU %ld \n", tailleVecteur, tpsCalcul);
startTimer;
add_vec_scalaire_cpu (vecteur, resultat, alpha, tailleVecteur);
stopTimer;
printf("chrono_cpu %ld \n", tpsCalcul);
cudaFree(cudaVec);
cudaFree(cudaRes);
}
|
838d640c51742cf3602e6c66229ad4f6d638e755.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
| 838d640c51742cf3602e6c66229ad4f6d638e755.cu | #include <iostream>
#include <stdlib.h>
void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
3a0c783bec760efa805837c1b16219806764f902.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <omp.h>
#define TILE_WIDTH 16
// a simple version of matrix multiplication which issues redundant loads from off-chip
// global memory
__global__ void matrixMultiplySimple(float *a, float *b, float *c, int width)
{
// Calculate the row and column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
// do dot product between row of a and column of b
for(int i=0; i<width; i++)
{
result += a[row*width+i] * b[i*width+col];
}
// write out thread's result
c[row*width+col] = result;
}
//an optimized version of matrix_multiplication which eliminates redundant loads
__global__ void matrixMultiplyOptimised(float *a, float *b, float*c, int width)
{
// create shorthand names for threadIdx & blockIdx
int tx = threadIdx.x;
int ty = threadIdx.y;
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
//calculate the row and column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
//loop over the tiles of the input in phases
for(int p=0; p<width/TILE_WIDTH; p++)
{
// collab load tiles into __shared__
s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)];
s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col];
// wait until all data loaded before allowing any thread in this block
// to continue
__syncthreads();
// do dot product between row of s_a and column of s_b
for(int i=0; i<TILE_WIDTH; i++)
{
result += s_a[ty][i] * s_b[i][tx];
}
// wait until all threads are finished before allowing any thread in this
// block to continue
__syncthreads();
}
// write out this thread's result
c[row*width + col] = result;
}
void matrixMultiplyCPU(float *a, float *b, float *c, int width)
{
float result;
for(int row=0; row<width; row++)
{
for(int col=0; col<width; col++)
{
result = 0;
for(int k=0; k<width; k++)
{
result += a[row*width + k] * b[k*width + col];
}
c[row*width + col] = result;
}
}
}
int main(void)
{
// the width of the matrix (not the number of total elements)
int N = 1024;
//grid and block size
dim3 block(TILE_WIDTH, TILE_WIDTH);
dim3 grid(N/block.x, N/block.y);
//host memory pointers
float *a_h = NULL;
float *b_h = NULL;
float *c_h = NULL;
//allocate host memory
size_t memSize = (N*N) * sizeof(float);
a_h = (float *) malloc(memSize);
b_h = (float *) malloc(memSize);
c_h = (float *) malloc(memSize);
// Generate random input
for(int i=0; i < N*N; ++i)
{
a_h[i] = (float)(rand()/RAND_MAX);
b_h[i] = (float)(rand()/RAND_MAX);
}
// device memory pointers
float *a_d = NULL;
float *b_d = NULL;
float *c_d = NULL;
// allocate device memory
hipMalloc((void**)&a_d, (N*N)*sizeof(float));
hipMalloc((void**)&b_d, (N*N)*sizeof(float));
hipMalloc((void**)&c_d, (N*N)*sizeof(float));
// copy input to the device
hipMemcpy(a_d, a_h, (N*N) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, (N*N) * sizeof(float), hipMemcpyHostToDevice);
// get start time
float cpuStart = omp_get_wtime();
printf("Measuring CPU execution time ...\n");
matrixMultiplyCPU(a_h, b_h, c_h, N);
//get end time
float cpuEnd = omp_get_wtime();
float cpuTime = (cpuEnd - cpuStart)* 1000;
// Cuda events to measure time
hipEvent_t start;
hipEvent_t stop;
float simpleKernelTime;
float optimisedKernelTime;
// start timer
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//launch simple kernel multiple times
printf("Measuring the execution time of the simple kernel ... \n");
hipLaunchKernelGGL(( matrixMultiplySimple), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, N);
//stop timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&simpleKernelTime, start, stop);
// start timer
hipEventRecord(start, 0);
//launch optimised kernel multiple times
printf("Measuring the execution time of the optimised kernel...\n");
hipLaunchKernelGGL(( matrixMultiplyOptimised), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, N);
//stop timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&optimisedKernelTime, start, stop);
//print out executtion times
printf("Naive CPU implementation time: %f ms\n", cpuTime);
printf("Naive GPU implementation time: %f ms\n", simpleKernelTime);
printf("Optimised GPU implementation time: %f ms\n", optimisedKernelTime);
// free device memory
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
// free host memory
free(a_h);
free(b_h);
return 0;
}
| 3a0c783bec760efa805837c1b16219806764f902.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <omp.h>
#define TILE_WIDTH 16
// a simple version of matrix multiplication which issues redundant loads from off-chip
// global memory
__global__ void matrixMultiplySimple(float *a, float *b, float *c, int width)
{
// Calculate the row and column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
// do dot product between row of a and column of b
for(int i=0; i<width; i++)
{
result += a[row*width+i] * b[i*width+col];
}
// write out thread's result
c[row*width+col] = result;
}
//an optimized version of matrix_multiplication which eliminates redundant loads
__global__ void matrixMultiplyOptimised(float *a, float *b, float*c, int width)
{
// create shorthand names for threadIdx & blockIdx
int tx = threadIdx.x;
int ty = threadIdx.y;
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
//calculate the row and column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
//loop over the tiles of the input in phases
for(int p=0; p<width/TILE_WIDTH; p++)
{
// collab load tiles into __shared__
s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)];
s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col];
// wait until all data loaded before allowing any thread in this block
// to continue
__syncthreads();
// do dot product between row of s_a and column of s_b
for(int i=0; i<TILE_WIDTH; i++)
{
result += s_a[ty][i] * s_b[i][tx];
}
// wait until all threads are finished before allowing any thread in this
// block to continue
__syncthreads();
}
// write out this thread's result
c[row*width + col] = result;
}
void matrixMultiplyCPU(float *a, float *b, float *c, int width)
{
float result;
for(int row=0; row<width; row++)
{
for(int col=0; col<width; col++)
{
result = 0;
for(int k=0; k<width; k++)
{
result += a[row*width + k] * b[k*width + col];
}
c[row*width + col] = result;
}
}
}
int main(void)
{
// the width of the matrix (not the number of total elements)
int N = 1024;
//grid and block size
dim3 block(TILE_WIDTH, TILE_WIDTH);
dim3 grid(N/block.x, N/block.y);
//host memory pointers
float *a_h = NULL;
float *b_h = NULL;
float *c_h = NULL;
//allocate host memory
size_t memSize = (N*N) * sizeof(float);
a_h = (float *) malloc(memSize);
b_h = (float *) malloc(memSize);
c_h = (float *) malloc(memSize);
// Generate random input
for(int i=0; i < N*N; ++i)
{
a_h[i] = (float)(rand()/RAND_MAX);
b_h[i] = (float)(rand()/RAND_MAX);
}
// device memory pointers
float *a_d = NULL;
float *b_d = NULL;
float *c_d = NULL;
// allocate device memory
cudaMalloc((void**)&a_d, (N*N)*sizeof(float));
cudaMalloc((void**)&b_d, (N*N)*sizeof(float));
cudaMalloc((void**)&c_d, (N*N)*sizeof(float));
// copy input to the device
cudaMemcpy(a_d, a_h, (N*N) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, (N*N) * sizeof(float), cudaMemcpyHostToDevice);
// get start time
float cpuStart = omp_get_wtime();
printf("Measuring CPU execution time ...\n");
matrixMultiplyCPU(a_h, b_h, c_h, N);
//get end time
float cpuEnd = omp_get_wtime();
float cpuTime = (cpuEnd - cpuStart)* 1000;
// Cuda events to measure time
cudaEvent_t start;
cudaEvent_t stop;
float simpleKernelTime;
float optimisedKernelTime;
// start timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//launch simple kernel multiple times
printf("Measuring the execution time of the simple kernel ... \n");
matrixMultiplySimple<<<grid, block>>>(a_d, b_d, c_d, N);
//stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&simpleKernelTime, start, stop);
// start timer
cudaEventRecord(start, 0);
//launch optimised kernel multiple times
printf("Measuring the execution time of the optimised kernel...\n");
matrixMultiplyOptimised<<<grid, block>>>(a_d, b_d, c_d, N);
//stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&optimisedKernelTime, start, stop);
//print out executtion times
printf("Naive CPU implementation time: %f ms\n", cpuTime);
printf("Naive GPU implementation time: %f ms\n", simpleKernelTime);
printf("Optimised GPU implementation time: %f ms\n", optimisedKernelTime);
// free device memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
// free host memory
free(a_h);
free(b_h);
return 0;
}
|
8ddb496cdb55cdd7a648629afa7c350009a7059d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <common.hpp>
#include <layers/interaction_layer.hpp>
#include <type_traits>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <uint x>
struct Log2 {
static constexpr uint value = 1 + Log2<x / 2>::value;
};
template <>
struct Log2<1> {
static constexpr uint value = 0;
};
struct __align__(8) half4 {
half2 vals[2];
};
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelNonAligned(
const __half *__restrict bottom_mlp_input, const __half *__restrict emb_input,
__half *__restrict output, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint smem_elems_per_warp,
uint smem_rows_per_warp, uint output_size, uint num_row_steps, uint num_col_steps) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
// for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (uint i = 0; i < num_rows; ++i) {
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
(shmem + i * SMEM_STRIDE)[idx] = sample_input[idx];
}
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
gmem_output[idx] = shmem[idx];
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[M_BLOCKS];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::col_major>
b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
nvcuda::wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
nvcuda::wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC,
nvcuda::wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
#else
#warning "dotBasedInteractFwdKernelNonAligned is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractFwdKernel(const __half *__restrict bottom_mlp_input,
const __half *__restrict emb_input, __half *__restrict output,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint smem_elems_per_warp, uint smem_rows_per_warp,
uint output_size, uint num_row_steps, uint num_col_steps) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
if (lane_id < (num_cols >> 2)) {
// for (int i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (int i = 0; i < num_rows; ++i) {
((float2 *)(shmem + i * SMEM_STRIDE))[lane_id] = ((float2 *)sample_input)[lane_id];
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_output)[lane_id] = ((float2 *)shmem)[lane_id];
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[M_BLOCKS];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::col_major>
b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
nvcuda::wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
nvcuda::wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC,
nvcuda::wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
#else
#warning "dotBasedInteractFwdKernel is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractBwdKernelNonAligned(
const __half *__restrict upstream_grad, half __restrict *bottom_mlp_grad,
half __restrict *emb_grad, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint sample_size,
uint interaction_ugrad_size, uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems, uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps, uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = gmem_row_ptr[idx];
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
smem_row_ptr[idx] = __float2half(0);
}
}
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
for (uint idx = lane_id; idx < num_cols_after_padding; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = __float2half(0);
}
}
__syncwarp();
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
nvcuda::wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[ROW_TILES_PER_STEP];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
nvcuda::wmma::fill_fragment(acc[i], 0);
nvcuda::wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
nvcuda::wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, nvcuda::wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
// gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) +
// lane_id]);
half *gmem_grad = (i == 0) ? gmem_bottom_mlp_grad : gmem_emb_grad;
uint idx = (i == 0) ? gmem_grad_col : ((i - 1) * num_cols + gmem_grad_col);
half val = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
gmem_grad[idx] = (i == 0) ? (val + gmem_ugrad[idx]) : val;
}
}
}
// for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
// gmem_mlp_grad[idx] = gmem_ugrad[idx];
// }
#else
#warning "dotBasedInteractBwdKernelNonAligned is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractBwdKernel(const __half *__restrict upstream_grad,
half __restrict *bottom_mlp_grad, half __restrict *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint sample_size, uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps,
uint row_tiles_per_step, uint shared_mem_per_warp_size_byte) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 3); idx += THREADS_IN_WARP) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_ugrad_interactions)[idx];
}
uint offset = (interaction_ugrad_size >> 3) << 3;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
((float2 *)smem_row_ptr)[lane_id] = ((float2 *)gmem_row_ptr)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
((half4 *)smem_row_ptr)[lane_id] = zeros;
}
}
__syncwarp();
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
nvcuda::wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[ROW_TILES_PER_STEP];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
nvcuda::wmma::fill_fragment(acc[i], 0);
nvcuda::wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
nvcuda::wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, nvcuda::wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col_base = (col_step << TILE_DIM_LOG_2);
uint gmem_grad_col = gmem_grad_col_base + lane_id;
if (gmem_grad_col < num_cols) {
if (lane_id < 8) {
((__half2 *)(gmem_bottom_mlp_grad + gmem_grad_col_base))[lane_id] =
__hadd2(__float22half2_rn(((float2 *)smem_out)[lane_id]),
((__half2 *)(gmem_ugrad + gmem_grad_col_base))[lane_id]);
}
for (uint i = 0; i < num_rows - 1; i++) {
half val = __float2half(smem_out[((i + 1) << TILE_DIM_LOG_2) + lane_id]);
gmem_emb_grad[i * num_cols + gmem_grad_col] = val;
}
}
}
#else
#warning "dotBasedInteractBwdKernel is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
inline void dotBasedInteractFwd(const void *bottom_mlp_input, const void *emb_input, void *output,
uint batch_size, uint num_rows, uint num_cols,
hipStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint warps_per_threadblock = 4;
const uint threadblock_size = warps_per_threadblock * 32;
const uint kPaddingSize = 1;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
uint num_row_steps = num_row_tiles / kRowTilesPerStep;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
const uint K_BLOCKS = 8;
const uint M_BLOCKS = 2;
const uint SKEW_HALF = ((K_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE = (K_BLOCKS * 16 + SKEW_HALF);
// multiple of 2 to guarantee 256-bit alignment for start of the row, at least 16 to safeload a
// tile
const uint smem_rows_per_warp = M_BLOCKS << 4;
const uint smem_elems_per_warp_mat = smem_rows_per_warp * SMEM_STRIDE;
const uint SKEW_HALF_ACC = ((M_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE_ACC = (M_BLOCKS * 16 + SKEW_HALF_ACC);
const uint smem_elems_per_warp_acc = M_BLOCKS * 16 * SMEM_STRIDE_ACC * 2; // output in FP32
const uint smem_elems_per_warp = (smem_elems_per_warp_mat > smem_elems_per_warp_acc)
? smem_elems_per_warp_mat
: smem_elems_per_warp_acc;
uint output_size = num_cols + (num_rows * (num_rows - 1) >> 1) + kPaddingSize;
bool float4_predicate = !((num_cols & 7) || (output_size & 7));
if (float4_predicate) {
hipLaunchKernelGGL(( dotBasedInteractFwdKernel<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>)
, dim3((batch_size + warps_per_threadblock - 1) / warps_per_threadblock), dim3(threadblock_size),
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream,
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
} else {
hipLaunchKernelGGL(( dotBasedInteractFwdKernelNonAligned<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2,
kTileDim, kTileDimLog2>)
, dim3((batch_size + warps_per_threadblock - 1) / warps_per_threadblock), dim3(threadblock_size),
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream,
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
}
}
inline void dotBasedInteractBwd(void *upstream_grad, void *bottom_mlp_grad, void *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
hipStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint mem_skew_size = 8;
const uint kPaddingSize = 1;
const uint kWarpsPerBlock = 4;
const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value;
const uint kNumThreads = kWarpsPerBlock * kWarpSize;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
uint row_tiles_per_step = num_rows > kTileDim ? kRowTilesPerStep : 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
// 2D ugrad size and stride
uint interaction_ugrad_2D_stride = num_rows_after_padding + mem_skew_size;
uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride;
uint interaction_ugrad_2D_size_bytes = interaction_ugrad_2D_size_elems * sizeof(half);
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1;
uint interaction_ugrad_size_with_padding = interaction_ugrad_size + kPaddingSize;
// in_out place size and stride
uint input_stride = num_cols_after_padding + mem_skew_size;
uint input_size_elems = num_rows_after_padding * input_stride;
uint input_size_bytes = input_size_elems * sizeof(half);
// sample size
uint sample_size = num_rows * num_cols;
// output size
uint output_size_elems = kTileDim * kTileDim * kRowTilesPerStep * kColTilesPerStep;
uint output_size_bytes = output_size_elems * sizeof(float);
// staging area size
uint staging_area_size_bytes = output_size_bytes > interaction_ugrad_2D_size_bytes
? output_size_bytes
: interaction_ugrad_2D_size_bytes;
// Shared memory size
uint shared_mem_per_warp_size_byte = input_size_bytes + staging_area_size_bytes;
uint shared_mem_size_bytes = kWarpsPerBlock * shared_mem_per_warp_size_byte;
uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2;
uint num_row_steps = num_row_tiles / row_tiles_per_step;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7));
if (float4_predicate) {
hipLaunchKernelGGL(( dotBasedInteractBwdKernel<kWarpsPerBlock, kNumThreads, kRowTilesPerStep, kColTilesPerStep,
kWarpSize, kWarpSizeLog2, kTileDim, kTileDimLog2>)
, dim3(num_blocks), dim3(kNumThreads), shared_mem_size_bytes, stream,
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
} else {
hipLaunchKernelGGL(( dotBasedInteractBwdKernelNonAligned<kWarpsPerBlock, kNumThreads, kRowTilesPerStep,
kColTilesPerStep, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>)
, dim3(num_blocks), dim3(kNumThreads), shared_mem_size_bytes, stream,
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
}
}
template <typename T>
__global__ void concat_kernel(bool forward, T *out, T *in_mlp, T *in_emb, const int h,
const int out_w, const int in_w, const int n_emb) {
const int n_ins = 1 + n_emb;
if (blockIdx.x < n_ins) {
T *in = (blockIdx.x == 0) ? in_mlp : in_emb + (blockIdx.x - 1) * in_w;
for (int bid = blockIdx.y; bid < h; bid += gridDim.y) {
int in_idx_base = (blockIdx.x == 0) ? bid * in_w : bid * in_w * n_emb;
for (int tid = threadIdx.x; tid < in_w; tid += blockDim.x) {
int in_idx = in_idx_base + tid;
int out_idx = bid * out_w + blockIdx.x * in_w + tid;
if (forward) {
out[out_idx] = in[in_idx];
} else {
in[in_idx] = (blockIdx.x == 0) ? (in[in_idx] + out[out_idx]) : out[out_idx];
}
}
}
}
}
template <typename T>
__global__ void gather_concat_fprop_kernel(T *out, const T *in0, const T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
if (col > row) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
s_buf[s_idx] = mat[g_in_idx];
}
}
}
__syncthreads();
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T value = (tid < w) ? in0[bid * w + tid] : s_buf[tid - w];
out[g_out_idx] = value;
}
__syncthreads();
}
}
template <typename T>
__global__ void transpose_and_add(const T *src, T *dst, const int h, const int n_ins) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.z; bid < h; bid += gridDim.z) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = bid * n_ins * n_ins + y * n_ins + x;
int sid_n = threadIdx.y * blockDim.x + threadIdx.x;
int sid_t = threadIdx.x * blockDim.y + threadIdx.y;
if (x < n_ins && y < n_ins) {
s_buf[sid_n] = src[gid];
}
__syncthreads();
if (x < n_ins && y < n_ins) {
dst[gid] = s_buf[sid_n] + s_buf[sid_t];
}
__syncthreads();
}
}
template <typename T>
__global__ void gather_concat_bprop_kernel(const T *out, T *in0, T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T val = out[g_out_idx];
if (tid < w) {
in0[bid * w + tid] = val;
} else {
s_buf[tid - w] = val;
}
}
__syncthreads();
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
mat[g_in_idx] = (col > row) ? s_buf[s_idx] : T(0);
}
}
__syncthreads();
}
}
} // anonymous namespace
template <typename T>
InteractionLayer<T>::InteractionLayer(
const Tensor2<T> &in_bottom_mlp_tensor, const Tensor2<T> &in_embeddings, Tensor2<T> &out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>> &blobs_buff,
const std::shared_ptr<GPUResource> &gpu_resource, bool use_mixed_precision,
bool enable_tf32_compute)
: Layer(gpu_resource),
use_mixed_precision_(use_mixed_precision),
enable_tf32_compute_(enable_tf32_compute) {
try {
auto first_in_dims = in_bottom_mlp_tensor.get_dimensions();
auto second_in_dims = in_embeddings.get_dimensions();
if (first_in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Input Bottom MLP must be a 2D tensor");
}
if (second_in_dims.size() != 3) {
CK_THROW_(Error_t::WrongInput, "Input Embeddings must be a 3D tensor");
}
if (first_in_dims[0] != second_in_dims[0]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' batch sizes must be the same");
}
if (first_in_dims[1] != second_in_dims[2]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' widths must be the same");
}
size_t n_ins = 1 + second_in_dims[1];
if (std::is_same<T, __half>::value == false) {
size_t concat_dims_width = first_in_dims[1] + second_in_dims[1] * second_in_dims[2];
std::vector<size_t> concat_dims = {first_in_dims[0], concat_dims_width};
{
Tensor2<T> tensor;
blobs_buff->reserve(concat_dims, &tensor);
internal_tensors_.push_back(tensor);
}
{
std::vector<size_t> mat_dims = {first_in_dims[0], n_ins * n_ins};
Tensor2<T> tensor;
blobs_buff->reserve(mat_dims, &tensor);
internal_tensors_.push_back(tensor);
}
{
Tensor2<T> tensor;
blobs_buff->reserve(concat_dims, &tensor);
internal_tensors_.push_back(tensor);
}
}
int concat_len = n_ins * (n_ins + 1) / 2 - n_ins;
std::vector<size_t> out_dims = {first_in_dims[0], first_in_dims[1] + concat_len + 1};
blobs_buff->reserve(out_dims, &out_tensor);
in_tensors_.push_back(in_bottom_mlp_tensor);
in_tensors_.push_back(in_embeddings);
out_tensors_.push_back(out_tensor);
} catch (const std::runtime_error &rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
InteractionLayer<T>::~InteractionLayer(){};
template <typename T>
void InteractionLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
// phase 0: concat
T *concat = internal_tensors_[0].get_ptr();
T *in_mlp = get_in_tensors(is_train)[0].get_ptr();
T *in_emb = get_in_tensors(is_train)[1].get_ptr();
const int h = internal_tensors_[0].get_dimensions()[0];
const int out_w = internal_tensors_[0].get_dimensions()[1];
const int in_w = get_in_tensors(is_train)[0].get_dimensions()[1];
const int n_emb = get_in_tensors(is_train)[1].get_dimensions()[1];
const int n_ins = 1 + n_emb;
dim3 grid0(n_ins, get_gpu().get_sm_count(), 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
hipLaunchKernelGGL(( concat_kernel), dim3(grid0), dim3(block0), 0, get_gpu().get_stream(), true, concat, in_mlp, in_emb, h,
out_w, in_w, n_emb);
// phase 1: matmul
const int batch_count = h;
T *mat = internal_tensors_[1].get_ptr();
const int m = n_ins;
const int n = n_ins;
const int k = in_w;
float alpha = 1.0f;
float beta = 0.0f;
long long int stride_a = static_cast<long long int>(n) * k;
long long int stride_b = static_cast<long long int>(k) * m;
long long int stride_c = static_cast<long long int>(n) * m;
hipDataType a_type = HIP_R_32F;
hipDataType b_type = HIP_R_32F;
hipDataType c_type = HIP_R_32F;
hipblasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
hipblasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : HIPBLAS_GEMM_DEFAULT;
CK_CUBLAS_THROW_(
hipblasGemmStridedBatchedEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k,
&alpha, concat, a_type, k, stride_a, concat, b_type, k, stride_b,
&beta, mat, c_type, n, stride_c, batch_count, compute_type, algo));
// phase 2: gather & concat
T *in0 = get_in_tensors(is_train)[0].get_ptr();
T *gather = out_tensors_[0].get_ptr();
dim3 grid1(get_gpu().get_sm_count() * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
hipLaunchKernelGGL(( gather_concat_fprop_kernel), dim3(grid1), dim3(block1), smem_size, get_gpu().get_stream(), gather, in0, mat,
h, n_ins, in_w);
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
__half *in_mlp = get_in_tensors(is_train)[0].get_ptr();
__half *in_emb = get_in_tensors(is_train)[1].get_ptr();
__half *output = out_tensors_[0].get_ptr();
const int h = get_in_tensors(is_train)[0].get_dimensions()[0];
const int in_w = get_in_tensors(is_train)[0].get_dimensions()[1];
const int n_emb = get_in_tensors(is_train)[1].get_dimensions()[1];
const int n_ins = 1 + n_emb;
dotBasedInteractFwd(in_mlp, in_emb, output, h, n_ins, in_w, get_gpu().get_stream());
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
void InteractionLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
// phase 0:
T *gather = out_tensors_[0].get_ptr();
T *in0 = get_in_tensors(true)[0].get_ptr();
T *mat = internal_tensors_[1].get_ptr();
const int h = internal_tensors_[0].get_dimensions()[0];
const int n_ins = 1 + get_in_tensors(true)[1].get_dimensions()[1];
const int in_w = get_in_tensors(true)[0].get_dimensions()[1];
dim3 grid1(get_gpu().get_sm_count() * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
hipLaunchKernelGGL(( gather_concat_bprop_kernel), dim3(grid1), dim3(block1), smem_size, get_gpu().get_stream(), gather, in0, mat,
h, n_ins, in_w);
// phase 1:
const int batch_count = h;
T *concat = internal_tensors_[0].get_ptr();
T *concat_tmp = internal_tensors_[2].get_ptr();
const int m = n_ins;
const int n = in_w;
const int k = n_ins;
T alpha = 1.0f;
T beta = 0.0f;
long long int stride_a = static_cast<long long int>(n) * k;
long long int stride_b = static_cast<long long int>(k) * m;
long long int stride_c = static_cast<long long int>(n) * m;
hipDataType a_type = HIP_R_32F;
hipDataType b_type = HIP_R_32F;
hipDataType c_type = HIP_R_32F;
hipblasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
hipblasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : HIPBLAS_GEMM_DEFAULT;
// mat = mat + T(mat)
{
dim3 block(32, 32, 1);
dim3 grid((n_ins + block.x - 1) / block.x, (n_ins + block.y - 1) / block.y, h);
size_t smem_size = sizeof(T) * block.x * block.y;
hipLaunchKernelGGL(( transpose_and_add), dim3(grid), dim3(block), smem_size, get_gpu().get_stream(), mat, mat, h, n_ins);
}
CK_CUBLAS_THROW_(hipblasGemmStridedBatchedEx(
get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, concat, a_type, n,
stride_a, mat, b_type, k, stride_b, &beta, concat_tmp, c_type, n, stride_c, batch_count,
compute_type, algo));
// phase 2:
T *in_mlp = get_in_tensors(true)[0].get_ptr();
T *in_emb = get_in_tensors(true)[1].get_ptr();
const int out_w = internal_tensors_[0].get_dimensions()[1];
const int n_emb = get_in_tensors(true)[1].get_dimensions()[1];
dim3 grid0(n_ins, get_gpu().get_sm_count(), 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
hipLaunchKernelGGL(( concat_kernel), dim3(grid0), dim3(block0), 0, get_gpu().get_stream(), false, concat_tmp, in_mlp, in_emb, h,
out_w, in_w, n_emb);
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::bprop() {
CudaDeviceContext context(get_device_id());
__half *up_grad = out_tensors_[0].get_ptr();
__half *mlp_grad = get_in_tensors(true)[0].get_ptr();
__half *emb_grad = get_in_tensors(true)[1].get_ptr();
const int h = get_in_tensors(true)[0].get_dimensions()[0];
const int n_emb = get_in_tensors(true)[1].get_dimensions()[1];
const int n_ins = 1 + n_emb;
const int in_w = get_in_tensors(true)[0].get_dimensions()[1];
dotBasedInteractBwd(up_grad, mlp_grad, emb_grad, h, n_ins, in_w, get_gpu().get_stream());
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template class InteractionLayer<float>;
template class InteractionLayer<__half>;
} // namespace HugeCTR
| 8ddb496cdb55cdd7a648629afa7c350009a7059d.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <common.hpp>
#include <layers/interaction_layer.hpp>
#include <type_traits>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <uint x>
struct Log2 {
static constexpr uint value = 1 + Log2<x / 2>::value;
};
template <>
struct Log2<1> {
static constexpr uint value = 0;
};
struct __align__(8) half4 {
half2 vals[2];
};
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelNonAligned(
const __half *__restrict bottom_mlp_input, const __half *__restrict emb_input,
__half *__restrict output, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint smem_elems_per_warp,
uint smem_rows_per_warp, uint output_size, uint num_row_steps, uint num_col_steps) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
// for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (uint i = 0; i < num_rows; ++i) {
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
(shmem + i * SMEM_STRIDE)[idx] = sample_input[idx];
}
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
gmem_output[idx] = shmem[idx];
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[M_BLOCKS];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::col_major>
b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
nvcuda::wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
nvcuda::wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC,
nvcuda::wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
#else
#warning "dotBasedInteractFwdKernelNonAligned is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractFwdKernel(const __half *__restrict bottom_mlp_input,
const __half *__restrict emb_input, __half *__restrict output,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint smem_elems_per_warp, uint smem_rows_per_warp,
uint output_size, uint num_row_steps, uint num_col_steps) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
if (lane_id < (num_cols >> 2)) {
// for (int i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (int i = 0; i < num_rows; ++i) {
((float2 *)(shmem + i * SMEM_STRIDE))[lane_id] = ((float2 *)sample_input)[lane_id];
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_output)[lane_id] = ((float2 *)shmem)[lane_id];
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[M_BLOCKS];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::col_major>
b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
nvcuda::wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
nvcuda::wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
nvcuda::wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC,
nvcuda::wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
#else
#warning "dotBasedInteractFwdKernel is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractBwdKernelNonAligned(
const __half *__restrict upstream_grad, half __restrict *bottom_mlp_grad,
half __restrict *emb_grad, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint sample_size,
uint interaction_ugrad_size, uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems, uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps, uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = gmem_row_ptr[idx];
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
smem_row_ptr[idx] = __float2half(0);
}
}
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
for (uint idx = lane_id; idx < num_cols_after_padding; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = __float2half(0);
}
}
__syncwarp();
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
nvcuda::wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[ROW_TILES_PER_STEP];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
nvcuda::wmma::fill_fragment(acc[i], 0);
nvcuda::wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
nvcuda::wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, nvcuda::wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
// gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) +
// lane_id]);
half *gmem_grad = (i == 0) ? gmem_bottom_mlp_grad : gmem_emb_grad;
uint idx = (i == 0) ? gmem_grad_col : ((i - 1) * num_cols + gmem_grad_col);
half val = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
gmem_grad[idx] = (i == 0) ? (val + gmem_ugrad[idx]) : val;
}
}
}
// for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
// gmem_mlp_grad[idx] = gmem_ugrad[idx];
// }
#else
#warning "dotBasedInteractBwdKernelNonAligned is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractBwdKernel(const __half *__restrict upstream_grad,
half __restrict *bottom_mlp_grad, half __restrict *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint sample_size, uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps,
uint row_tiles_per_step, uint shared_mem_per_warp_size_byte) {
#if __CUDA_ARCH__ >= 700 || !defined(__CUDA_ARCH__)
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 3); idx += THREADS_IN_WARP) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_ugrad_interactions)[idx];
}
uint offset = (interaction_ugrad_size >> 3) << 3;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
((float2 *)smem_row_ptr)[lane_id] = ((float2 *)gmem_row_ptr)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
((half4 *)smem_row_ptr)[lane_id] = zeros;
}
}
__syncwarp();
nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
nvcuda::wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float>
acc[ROW_TILES_PER_STEP];
nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half,
nvcuda::wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
nvcuda::wmma::fill_fragment(acc[i], 0);
nvcuda::wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
nvcuda::wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
nvcuda::wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, nvcuda::wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col_base = (col_step << TILE_DIM_LOG_2);
uint gmem_grad_col = gmem_grad_col_base + lane_id;
if (gmem_grad_col < num_cols) {
if (lane_id < 8) {
((__half2 *)(gmem_bottom_mlp_grad + gmem_grad_col_base))[lane_id] =
__hadd2(__float22half2_rn(((float2 *)smem_out)[lane_id]),
((__half2 *)(gmem_ugrad + gmem_grad_col_base))[lane_id]);
}
for (uint i = 0; i < num_rows - 1; i++) {
half val = __float2half(smem_out[((i + 1) << TILE_DIM_LOG_2) + lane_id]);
gmem_emb_grad[i * num_cols + gmem_grad_col] = val;
}
}
}
#else
#warning "dotBasedInteractBwdKernel is not supported for SM < 70 (or __CUDA_ARCH__ < 700)"
#endif
}
inline void dotBasedInteractFwd(const void *bottom_mlp_input, const void *emb_input, void *output,
uint batch_size, uint num_rows, uint num_cols,
cudaStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint warps_per_threadblock = 4;
const uint threadblock_size = warps_per_threadblock * 32;
const uint kPaddingSize = 1;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
uint num_row_steps = num_row_tiles / kRowTilesPerStep;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
const uint K_BLOCKS = 8;
const uint M_BLOCKS = 2;
const uint SKEW_HALF = ((K_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE = (K_BLOCKS * 16 + SKEW_HALF);
// multiple of 2 to guarantee 256-bit alignment for start of the row, at least 16 to safeload a
// tile
const uint smem_rows_per_warp = M_BLOCKS << 4;
const uint smem_elems_per_warp_mat = smem_rows_per_warp * SMEM_STRIDE;
const uint SKEW_HALF_ACC = ((M_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE_ACC = (M_BLOCKS * 16 + SKEW_HALF_ACC);
const uint smem_elems_per_warp_acc = M_BLOCKS * 16 * SMEM_STRIDE_ACC * 2; // output in FP32
const uint smem_elems_per_warp = (smem_elems_per_warp_mat > smem_elems_per_warp_acc)
? smem_elems_per_warp_mat
: smem_elems_per_warp_acc;
uint output_size = num_cols + (num_rows * (num_rows - 1) >> 1) + kPaddingSize;
bool float4_predicate = !((num_cols & 7) || (output_size & 7));
if (float4_predicate) {
dotBasedInteractFwdKernel<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>
<<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock, threadblock_size,
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream>>>(
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
} else {
dotBasedInteractFwdKernelNonAligned<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2,
kTileDim, kTileDimLog2>
<<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock, threadblock_size,
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream>>>(
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
}
}
inline void dotBasedInteractBwd(void *upstream_grad, void *bottom_mlp_grad, void *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
cudaStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint mem_skew_size = 8;
const uint kPaddingSize = 1;
const uint kWarpsPerBlock = 4;
const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value;
const uint kNumThreads = kWarpsPerBlock * kWarpSize;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
uint row_tiles_per_step = num_rows > kTileDim ? kRowTilesPerStep : 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
// 2D ugrad size and stride
uint interaction_ugrad_2D_stride = num_rows_after_padding + mem_skew_size;
uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride;
uint interaction_ugrad_2D_size_bytes = interaction_ugrad_2D_size_elems * sizeof(half);
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1;
uint interaction_ugrad_size_with_padding = interaction_ugrad_size + kPaddingSize;
// in_out place size and stride
uint input_stride = num_cols_after_padding + mem_skew_size;
uint input_size_elems = num_rows_after_padding * input_stride;
uint input_size_bytes = input_size_elems * sizeof(half);
// sample size
uint sample_size = num_rows * num_cols;
// output size
uint output_size_elems = kTileDim * kTileDim * kRowTilesPerStep * kColTilesPerStep;
uint output_size_bytes = output_size_elems * sizeof(float);
// staging area size
uint staging_area_size_bytes = output_size_bytes > interaction_ugrad_2D_size_bytes
? output_size_bytes
: interaction_ugrad_2D_size_bytes;
// Shared memory size
uint shared_mem_per_warp_size_byte = input_size_bytes + staging_area_size_bytes;
uint shared_mem_size_bytes = kWarpsPerBlock * shared_mem_per_warp_size_byte;
uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2;
uint num_row_steps = num_row_tiles / row_tiles_per_step;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7));
if (float4_predicate) {
dotBasedInteractBwdKernel<kWarpsPerBlock, kNumThreads, kRowTilesPerStep, kColTilesPerStep,
kWarpSize, kWarpSizeLog2, kTileDim, kTileDimLog2>
<<<num_blocks, kNumThreads, shared_mem_size_bytes, stream>>>(
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
} else {
dotBasedInteractBwdKernelNonAligned<kWarpsPerBlock, kNumThreads, kRowTilesPerStep,
kColTilesPerStep, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>
<<<num_blocks, kNumThreads, shared_mem_size_bytes, stream>>>(
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
}
}
template <typename T>
__global__ void concat_kernel(bool forward, T *out, T *in_mlp, T *in_emb, const int h,
const int out_w, const int in_w, const int n_emb) {
const int n_ins = 1 + n_emb;
if (blockIdx.x < n_ins) {
T *in = (blockIdx.x == 0) ? in_mlp : in_emb + (blockIdx.x - 1) * in_w;
for (int bid = blockIdx.y; bid < h; bid += gridDim.y) {
int in_idx_base = (blockIdx.x == 0) ? bid * in_w : bid * in_w * n_emb;
for (int tid = threadIdx.x; tid < in_w; tid += blockDim.x) {
int in_idx = in_idx_base + tid;
int out_idx = bid * out_w + blockIdx.x * in_w + tid;
if (forward) {
out[out_idx] = in[in_idx];
} else {
in[in_idx] = (blockIdx.x == 0) ? (in[in_idx] + out[out_idx]) : out[out_idx];
}
}
}
}
}
template <typename T>
__global__ void gather_concat_fprop_kernel(T *out, const T *in0, const T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
if (col > row) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
s_buf[s_idx] = mat[g_in_idx];
}
}
}
__syncthreads();
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T value = (tid < w) ? in0[bid * w + tid] : s_buf[tid - w];
out[g_out_idx] = value;
}
__syncthreads();
}
}
template <typename T>
__global__ void transpose_and_add(const T *src, T *dst, const int h, const int n_ins) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.z; bid < h; bid += gridDim.z) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = bid * n_ins * n_ins + y * n_ins + x;
int sid_n = threadIdx.y * blockDim.x + threadIdx.x;
int sid_t = threadIdx.x * blockDim.y + threadIdx.y;
if (x < n_ins && y < n_ins) {
s_buf[sid_n] = src[gid];
}
__syncthreads();
if (x < n_ins && y < n_ins) {
dst[gid] = s_buf[sid_n] + s_buf[sid_t];
}
__syncthreads();
}
}
template <typename T>
__global__ void gather_concat_bprop_kernel(const T *out, T *in0, T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T val = out[g_out_idx];
if (tid < w) {
in0[bid * w + tid] = val;
} else {
s_buf[tid - w] = val;
}
}
__syncthreads();
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
mat[g_in_idx] = (col > row) ? s_buf[s_idx] : T(0);
}
}
__syncthreads();
}
}
} // anonymous namespace
template <typename T>
InteractionLayer<T>::InteractionLayer(
const Tensor2<T> &in_bottom_mlp_tensor, const Tensor2<T> &in_embeddings, Tensor2<T> &out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>> &blobs_buff,
const std::shared_ptr<GPUResource> &gpu_resource, bool use_mixed_precision,
bool enable_tf32_compute)
: Layer(gpu_resource),
use_mixed_precision_(use_mixed_precision),
enable_tf32_compute_(enable_tf32_compute) {
try {
auto first_in_dims = in_bottom_mlp_tensor.get_dimensions();
auto second_in_dims = in_embeddings.get_dimensions();
if (first_in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Input Bottom MLP must be a 2D tensor");
}
if (second_in_dims.size() != 3) {
CK_THROW_(Error_t::WrongInput, "Input Embeddings must be a 3D tensor");
}
if (first_in_dims[0] != second_in_dims[0]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' batch sizes must be the same");
}
if (first_in_dims[1] != second_in_dims[2]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' widths must be the same");
}
size_t n_ins = 1 + second_in_dims[1];
if (std::is_same<T, __half>::value == false) {
size_t concat_dims_width = first_in_dims[1] + second_in_dims[1] * second_in_dims[2];
std::vector<size_t> concat_dims = {first_in_dims[0], concat_dims_width};
{
Tensor2<T> tensor;
blobs_buff->reserve(concat_dims, &tensor);
internal_tensors_.push_back(tensor);
}
{
std::vector<size_t> mat_dims = {first_in_dims[0], n_ins * n_ins};
Tensor2<T> tensor;
blobs_buff->reserve(mat_dims, &tensor);
internal_tensors_.push_back(tensor);
}
{
Tensor2<T> tensor;
blobs_buff->reserve(concat_dims, &tensor);
internal_tensors_.push_back(tensor);
}
}
int concat_len = n_ins * (n_ins + 1) / 2 - n_ins;
std::vector<size_t> out_dims = {first_in_dims[0], first_in_dims[1] + concat_len + 1};
blobs_buff->reserve(out_dims, &out_tensor);
in_tensors_.push_back(in_bottom_mlp_tensor);
in_tensors_.push_back(in_embeddings);
out_tensors_.push_back(out_tensor);
} catch (const std::runtime_error &rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
InteractionLayer<T>::~InteractionLayer(){};
template <typename T>
void InteractionLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
// phase 0: concat
T *concat = internal_tensors_[0].get_ptr();
T *in_mlp = get_in_tensors(is_train)[0].get_ptr();
T *in_emb = get_in_tensors(is_train)[1].get_ptr();
const int h = internal_tensors_[0].get_dimensions()[0];
const int out_w = internal_tensors_[0].get_dimensions()[1];
const int in_w = get_in_tensors(is_train)[0].get_dimensions()[1];
const int n_emb = get_in_tensors(is_train)[1].get_dimensions()[1];
const int n_ins = 1 + n_emb;
dim3 grid0(n_ins, get_gpu().get_sm_count(), 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
concat_kernel<<<grid0, block0, 0, get_gpu().get_stream()>>>(true, concat, in_mlp, in_emb, h,
out_w, in_w, n_emb);
// phase 1: matmul
const int batch_count = h;
T *mat = internal_tensors_[1].get_ptr();
const int m = n_ins;
const int n = n_ins;
const int k = in_w;
float alpha = 1.0f;
float beta = 0.0f;
long long int stride_a = static_cast<long long int>(n) * k;
long long int stride_b = static_cast<long long int>(k) * m;
long long int stride_c = static_cast<long long int>(n) * m;
cudaDataType_t a_type = CUDA_R_32F;
cudaDataType_t b_type = CUDA_R_32F;
cudaDataType_t c_type = CUDA_R_32F;
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
cublasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : CUBLAS_GEMM_DEFAULT;
CK_CUBLAS_THROW_(
cublasGemmStridedBatchedEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, m, n, k,
&alpha, concat, a_type, k, stride_a, concat, b_type, k, stride_b,
&beta, mat, c_type, n, stride_c, batch_count, compute_type, algo));
// phase 2: gather & concat
T *in0 = get_in_tensors(is_train)[0].get_ptr();
T *gather = out_tensors_[0].get_ptr();
dim3 grid1(get_gpu().get_sm_count() * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
gather_concat_fprop_kernel<<<grid1, block1, smem_size, get_gpu().get_stream()>>>(gather, in0, mat,
h, n_ins, in_w);
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
__half *in_mlp = get_in_tensors(is_train)[0].get_ptr();
__half *in_emb = get_in_tensors(is_train)[1].get_ptr();
__half *output = out_tensors_[0].get_ptr();
const int h = get_in_tensors(is_train)[0].get_dimensions()[0];
const int in_w = get_in_tensors(is_train)[0].get_dimensions()[1];
const int n_emb = get_in_tensors(is_train)[1].get_dimensions()[1];
const int n_ins = 1 + n_emb;
dotBasedInteractFwd(in_mlp, in_emb, output, h, n_ins, in_w, get_gpu().get_stream());
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
void InteractionLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
// phase 0:
T *gather = out_tensors_[0].get_ptr();
T *in0 = get_in_tensors(true)[0].get_ptr();
T *mat = internal_tensors_[1].get_ptr();
const int h = internal_tensors_[0].get_dimensions()[0];
const int n_ins = 1 + get_in_tensors(true)[1].get_dimensions()[1];
const int in_w = get_in_tensors(true)[0].get_dimensions()[1];
dim3 grid1(get_gpu().get_sm_count() * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
gather_concat_bprop_kernel<<<grid1, block1, smem_size, get_gpu().get_stream()>>>(gather, in0, mat,
h, n_ins, in_w);
// phase 1:
const int batch_count = h;
T *concat = internal_tensors_[0].get_ptr();
T *concat_tmp = internal_tensors_[2].get_ptr();
const int m = n_ins;
const int n = in_w;
const int k = n_ins;
T alpha = 1.0f;
T beta = 0.0f;
long long int stride_a = static_cast<long long int>(n) * k;
long long int stride_b = static_cast<long long int>(k) * m;
long long int stride_c = static_cast<long long int>(n) * m;
cudaDataType_t a_type = CUDA_R_32F;
cudaDataType_t b_type = CUDA_R_32F;
cudaDataType_t c_type = CUDA_R_32F;
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
cublasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : CUBLAS_GEMM_DEFAULT;
// mat = mat + T(mat)
{
dim3 block(32, 32, 1);
dim3 grid((n_ins + block.x - 1) / block.x, (n_ins + block.y - 1) / block.y, h);
size_t smem_size = sizeof(T) * block.x * block.y;
transpose_and_add<<<grid, block, smem_size, get_gpu().get_stream()>>>(mat, mat, h, n_ins);
}
CK_CUBLAS_THROW_(cublasGemmStridedBatchedEx(
get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, concat, a_type, n,
stride_a, mat, b_type, k, stride_b, &beta, concat_tmp, c_type, n, stride_c, batch_count,
compute_type, algo));
// phase 2:
T *in_mlp = get_in_tensors(true)[0].get_ptr();
T *in_emb = get_in_tensors(true)[1].get_ptr();
const int out_w = internal_tensors_[0].get_dimensions()[1];
const int n_emb = get_in_tensors(true)[1].get_dimensions()[1];
dim3 grid0(n_ins, get_gpu().get_sm_count(), 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
concat_kernel<<<grid0, block0, 0, get_gpu().get_stream()>>>(false, concat_tmp, in_mlp, in_emb, h,
out_w, in_w, n_emb);
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::bprop() {
CudaDeviceContext context(get_device_id());
__half *up_grad = out_tensors_[0].get_ptr();
__half *mlp_grad = get_in_tensors(true)[0].get_ptr();
__half *emb_grad = get_in_tensors(true)[1].get_ptr();
const int h = get_in_tensors(true)[0].get_dimensions()[0];
const int n_emb = get_in_tensors(true)[1].get_dimensions()[1];
const int n_ins = 1 + n_emb;
const int in_w = get_in_tensors(true)[0].get_dimensions()[1];
dotBasedInteractBwd(up_grad, mlp_grad, emb_grad, h, n_ins, in_w, get_gpu().get_stream());
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template class InteractionLayer<float>;
template class InteractionLayer<__half>;
} // namespace HugeCTR
|
0e9fb38ac1adf513f32d7e8ab7393597fcadda6e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define N 128
#define base 0
#define THREADS_PER_BLOCK 512
__global__ void count_characters(char *buffer, int *freq, long file_size, int total_threads);
void display_count(int *freq, int n);
/*
* Main
*/
int main(int argc, char *argv[]){
int blocks;
int num_threads;
float total_time, comp_time;
hipEvent_t total_start, total_stop, comp_start, comp_stop;
hipEventCreate(&total_start);
hipEventCreate(&total_stop);
hipEventCreate(&comp_start);
hipEventCreate(&comp_stop);
FILE *pFile;
long file_size;
char * buffer;
char * filename;
size_t result;
int * freq;
if (argc != 2) {
printf ("Usage : %s <file_name>\n", argv[0]);
return 1;
}
filename = argv[1];
pFile = fopen ( filename , "rb" );
if (pFile==NULL) {printf ("File error\n"); return 2;}
/* obtain file size */
fseek (pFile , 0 , SEEK_END);
file_size = ftell (pFile);
rewind (pFile);
printf("file size is %ld\n", file_size);
/* allocate memory to contain the file */
buffer = (char*) malloc (sizeof(char)*file_size);
if (buffer == NULL) {printf ("Memory error\n"); return 3;}
/* copy the file into the buffer */
result = fread (buffer,1,file_size,pFile);
if (result != file_size) {printf ("Reading error\n"); return 4;}
freq = (int*) malloc(sizeof(int)*N);
if (freq == NULL) {printf ("Memory error\n"); return 5;}
/*
* Memory allocation on device
*/
char *buff_dev;
int *freq_dev;
hipMalloc((void **)&buff_dev, file_size*sizeof(char));
hipMalloc((void **)&freq_dev, N*sizeof(int));
hipMemset(freq_dev, 0, N);
hipEventRecord(total_start);
/*
* Copy buffer from host memory to device memory
*/
hipMemcpy(buff_dev, buffer, sizeof(char)*file_size, hipMemcpyHostToDevice);
/*
* Create sufficient blocks
*/
blocks = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
/*
* Calculate number of threads
*/
num_threads = blocks * THREADS_PER_BLOCK;
hipEventRecord(comp_start);
/*
* Kernel call
*/
hipLaunchKernelGGL(( count_characters), dim3(blocks*2), dim3(N) , 0, 0, buff_dev, freq_dev, file_size, num_threads);
hipEventRecord(comp_stop);
hipEventSynchronize(comp_stop);
hipEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
hipMemcpy(freq, freq_dev, N*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(total_stop);
hipEventSynchronize(total_stop);
hipEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
hipFree(buff_dev);
hipFree(freq_dev);
hipEventDestroy(comp_start);
hipEventDestroy(comp_stop);
hipEventDestroy(total_start);
hipEventDestroy(total_stop);
/*
* Display Results
*/
display_count(freq, N);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", N, blocks, THREADS_PER_BLOCK*blocks);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
return 0;
}
/*
* Function: count_characters
* --------------------
* Counts the frequency of each character (atomic operation, freq array)
*
* buffer: pointer to char array that contains the txt file
* freq: pointer to int array that will contain the frequency of each character
* file_size: the size of the file (long number)
* total_threads: calculated total threads (int)
*
*/
__global__ void count_characters(char *buffer, int *freq, long file_size, int total_threads){
int index = threadIdx.x + blockIdx.x * blockDim.x;
long i;
for (i=index; i<file_size; i+=total_threads)
atomicAdd(&(freq[buffer[i] - base]), 1);
}
void display_count(int *freq, int n){
int j;
for (j=0; j<n; ++j)
(void) printf("%d = %d\n", j+base, freq[j]);
} | 0e9fb38ac1adf513f32d7e8ab7393597fcadda6e.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#define N 128
#define base 0
#define THREADS_PER_BLOCK 512
__global__ void count_characters(char *buffer, int *freq, long file_size, int total_threads);
void display_count(int *freq, int n);
/*
* Main
*/
int main(int argc, char *argv[]){
int blocks;
int num_threads;
float total_time, comp_time;
cudaEvent_t total_start, total_stop, comp_start, comp_stop;
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_stop);
FILE *pFile;
long file_size;
char * buffer;
char * filename;
size_t result;
int * freq;
if (argc != 2) {
printf ("Usage : %s <file_name>\n", argv[0]);
return 1;
}
filename = argv[1];
pFile = fopen ( filename , "rb" );
if (pFile==NULL) {printf ("File error\n"); return 2;}
/* obtain file size */
fseek (pFile , 0 , SEEK_END);
file_size = ftell (pFile);
rewind (pFile);
printf("file size is %ld\n", file_size);
/* allocate memory to contain the file */
buffer = (char*) malloc (sizeof(char)*file_size);
if (buffer == NULL) {printf ("Memory error\n"); return 3;}
/* copy the file into the buffer */
result = fread (buffer,1,file_size,pFile);
if (result != file_size) {printf ("Reading error\n"); return 4;}
freq = (int*) malloc(sizeof(int)*N);
if (freq == NULL) {printf ("Memory error\n"); return 5;}
/*
* Memory allocation on device
*/
char *buff_dev;
int *freq_dev;
cudaMalloc((void **)&buff_dev, file_size*sizeof(char));
cudaMalloc((void **)&freq_dev, N*sizeof(int));
cudaMemset(freq_dev, 0, N);
cudaEventRecord(total_start);
/*
* Copy buffer from host memory to device memory
*/
cudaMemcpy(buff_dev, buffer, sizeof(char)*file_size, cudaMemcpyHostToDevice);
/*
* Create sufficient blocks
*/
blocks = (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
/*
* Calculate number of threads
*/
num_threads = blocks * THREADS_PER_BLOCK;
cudaEventRecord(comp_start);
/*
* Kernel call
*/
count_characters<<< blocks*2, N >>>(buff_dev, freq_dev, file_size, num_threads);
cudaEventRecord(comp_stop);
cudaEventSynchronize(comp_stop);
cudaEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
cudaMemcpy(freq, freq_dev, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
cudaFree(buff_dev);
cudaFree(freq_dev);
cudaEventDestroy(comp_start);
cudaEventDestroy(comp_stop);
cudaEventDestroy(total_start);
cudaEventDestroy(total_stop);
/*
* Display Results
*/
display_count(freq, N);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", N, blocks, THREADS_PER_BLOCK*blocks);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
return 0;
}
/*
* Function: count_characters
* --------------------
* Counts the frequency of each character (atomic operation, freq array)
*
* buffer: pointer to char array that contains the txt file
* freq: pointer to int array that will contain the frequency of each character
* file_size: the size of the file (long number)
* total_threads: calculated total threads (int)
*
*/
__global__ void count_characters(char *buffer, int *freq, long file_size, int total_threads){
int index = threadIdx.x + blockIdx.x * blockDim.x;
long i;
for (i=index; i<file_size; i+=total_threads)
atomicAdd(&(freq[buffer[i] - base]), 1);
}
void display_count(int *freq, int n){
int j;
for (j=0; j<n; ++j)
(void) printf("%d = %d\n", j+base, freq[j]);
} |
1c2fe86da856033822583bfb226af64827a8bf3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <sys/stat.h>
#if USE_NVJPEG
#include <nvjpeg.h>
#else
#include <jpeglib.h>
#endif
#define WIDTH 1920
#define HEIGHT 1080
#include <display.h>
#include <pthread.h>
#include <math.h>
#include <inference.h>
#include <operators.h>
#include <asyncwork.h>
#include <jpegcodec.h>
#include <file.h>
#ifndef TITLE
#define TITLE "CUDA INFERENCE DEMO"
#endif
#define MIN_PROB 0.4
#ifndef USE_NVJPEG
#define USE_NVJPEG 0
#endif
//width and height defines come from inference.h at the moment
static uint8_t* imageBuffer = {0};
const char* const classNames[] = {
"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa","train", "tvmonitor"
};
__global__
void f_test(float4* out, int pitch_out, int width, int height)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
out[y * pitch_out / sizeof(float4) + x] = make_float4(
(float) x / width,
(float) y / height,
0, 1);
}
// RGB interleaved as 3 byte tupels
__global__
void f_jpeg(float4* out, int pitch_out, uint8_t* rgb)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= WIDTH || y >= HEIGHT) return;
out[y * pitch_out / sizeof(float4) + x] = make_float4(
rgb[0 + y * WIDTH * 3 + x * 3] / 255.0f,
rgb[1 + y * WIDTH * 3 + x * 3] / 255.0f,
rgb[2 + y * WIDTH * 3 + x * 3] / 255.0f,
1);
}
#define SDIV (WIDTH / 300.0f)
__global__
void f_normalize(float* normalized, uint8_t* rgb)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= 300 || y >= 300) return;
int sx = (int) (x * SDIV);
int sy = (int) (y * SDIV);
bool valid = sx < WIDTH && sy < HEIGHT;
size_t offset = sy * WIDTH + sx;
size_t scstride = 300 * 300;
size_t soffset = y * 300 + x;
normalized[soffset + 0 * scstride] = valid ? rgb[offset*3 + 2] - 104.0f : 0;
normalized[soffset + 1 * scstride] = valid ? rgb[offset*3 + 1] - 117.0f : 0;
normalized[soffset + 2 * scstride] = valid ? rgb[offset*3 + 0] - 123.0f : 0;
}
__global__
void f_bbox(float4* out, int pitch_out, float* boxes, uint32_t* nboxes)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= WIDTH || y >= HEIGHT) return;
int classification = 0;
for (int i=0; i<*nboxes; i++)
{
float* box = boxes + i * 7;
if (box[2] < MIN_PROB || box[2] > 1.0) continue;
if (box[1] <= 0 || box[1] >= 21) continue;
float minx = box[3] * WIDTH;
float miny = box[4] * WIDTH;
float maxx = box[5] * WIDTH;
float maxy = box[6] * WIDTH;
if (x < minx || x > maxx || y < miny || y > maxy) continue;
classification = box[1];
float alpha = 0.4;
float4 color = classification ? make_float4(
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.00f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.33f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.66f) * 2 * M_PI),
alpha) : make_float4(0,0,0,0);
int idx = y * pitch_out/sizeof(float4) + x;
out[idx] = out[idx] * (1-color.w) + color;
}
#if 0
float alpha = 0.4;
float4 color = classification ? make_float4(
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.00f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.33f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.66f) * 2 * M_PI),
alpha) : make_float4(0,0,0,0);
int idx = y * pitch_out/sizeof(float4) + x;
out[idx] = out[idx] * (1-color.w) + color;
#endif
}
int smToCores(int major, int minor)
{
switch ((major << 4) | minor)
{
case (9999 << 4 | 9999):
return 1;
case 0x30:
case 0x32:
case 0x35:
case 0x37:
return 192;
case 0x50:
case 0x52:
case 0x53:
return 128;
case 0x60:
return 64;
case 0x61:
case 0x62:
return 128;
case 0x70:
case 0x72:
case 0x75:
return 64;
case 0x80:
case 0x86:
return 64;
default:
return 0;
};
}
void selectGPU()
{
int rc;
int maxId = -1;
uint16_t maxScore = 0;
int count = 0;
hipDeviceProp_t prop;
rc = hipGetDeviceCount(&count);
if (hipSuccess != rc) throw "hipGetDeviceCount error";
if (count == 0) throw "No suitable cuda device found";
for (int id = 0; id < count; id++)
{
rc = hipGetDeviceProperties(&prop, id);
if (hipSuccess != rc) throw "Unable to get device properties";
if (prop.computeMode == hipComputeModeProhibited)
{
printf("GPU %d: PROHIBITED\n", id);
continue;
}
int sm_per_multiproc = smToCores(prop.major, prop.minor);
printf("GPU %d: \"%s\"\n", id, prop.name);
printf(" - Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" - Multiprocessors: %d\n", prop.multiProcessorCount);
printf(" - SMs per processor: %d\n", sm_per_multiproc);
printf(" - Clock rate: %d\n", prop.clockRate);
uint64_t score =(uint64_t) prop.multiProcessorCount * sm_per_multiproc * prop.clockRate;
if (score > maxScore)
{
maxId = id;
maxScore = score;
}
}
if (maxId < 0) throw "All cuda devices prohibited";
rc = hipSetDevice(maxId);
if (hipSuccess != rc) throw "Unable to set cuda device";
rc = hipGetDeviceProperties(&prop, maxId);
if (hipSuccess != rc) throw "Unable to get device properties";
printf("\nSelected GPU %d: \"%s\" with compute capability %d.%d\n\n",
maxId, prop.name, prop.major, prop.minor);
}
int main(int /*argc*/, char** /*argv*/)
{
int rc;
hipStream_t stream = 0;
try
{
printf("Selecting the best GPU\n");
selectGPU();
rc = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
if (hipSuccess != rc) throw "Unable to create CUDA stream";
const char* jpegPath = "cars-back.jpg";
printf("Loading \"%s\"\n", jpegPath);
JpegCodec codec;
codec.prepare(WIDTH, HEIGHT, 3, 90);
{
hipMalloc(&imageBuffer, WIDTH * HEIGHT * 3);
File jpeg;
jpeg.readAll(jpegPath);
#if USE_NVJPEG
codec.decodeToDeviceMemoryGPU(imageBuffer, jpeg.data(), jpeg.size(), stream);
#else
codec.decodeToDeviceMemoryCPU(imageBuffer, jpeg.data(), jpeg.size(), stream);
#endif
hipStreamSynchronize(stream);
}
// copy to output folder
const char* modelPath = "../../models/ssd.engine";
const char* prototxt = "../../models/ssd.prototxt";
const char* caffemodel= "../../models/ssd.caffemodel";
printf("Loading \"%s\"\n", modelPath);
Model model(modelPath, prototxt, caffemodel);
printf("Creating screen\n");
CudaDisplay display(TITLE, WIDTH, HEIGHT);
hipDeviceSynchronize();
dim3 blockSize = { 16, 16 };
dim3 gridSize = {
(WIDTH + blockSize.x - 1) / blockSize.x,
(HEIGHT + blockSize.y - 1) / blockSize.y
};
dim3 gridSize300 = {
(300 + blockSize.x - 1) / blockSize.x,
(300 + blockSize.y - 1) / blockSize.y
};
display.cudaMap(stream);
while (true)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
#if 0
hipLaunchKernelGGL(( f_test), dim3(gridSize), dim3(blockSize), 0, stream,
display.CUDA.frame.data,
display.CUDA.frame.pitch,
display.CUDA.frame.width,
display.CUDA.frame.height
);
#endif
hipLaunchKernelGGL(( f_normalize), dim3(gridSize300), dim3(blockSize), 0, stream,
(float*)model.inputFrame.data,
imageBuffer
);
hipLaunchKernelGGL(( f_jpeg), dim3(gridSize), dim3(blockSize), 0, stream,
display.CUDA.frame.data,
display.CUDA.frame.pitch,
imageBuffer
);
hipEventRecord(start,stream);
model.infer(stream);
hipEventRecord(stop,stream);
// Copy the bounding boxes to host memory to display them
// on the command line
uint32_t count;
hipMemcpyAsync(&count, model.keepCount.data, sizeof(uint32_t), hipMemcpyDeviceToHost, stream);
size_t bsize = count * 7 * sizeof(float);
float* boxes = (float*)malloc(bsize);
hipMemcpyAsync(boxes, model.boxesFrame.data, bsize, hipMemcpyDeviceToHost, stream);
// Draw the boxes (from device memory)
hipLaunchKernelGGL(( f_bbox), dim3(gridSize), dim3(blockSize), 0, stream,
display.CUDA.frame.data,
display.CUDA.frame.pitch,
(float*) model.boxesFrame.data,
(uint32_t*) model.keepCount.data);
// This is also done by display.cudaFinish in this example.
hipStreamSynchronize(stream);
// Here we know all drawing has been done, and all memcpy's are finished
// Due to the synchronize above
// Draw the pixelbuffer on screen
display.cudaFinish(stream);
display.render(stream);
float ms;
hipEventElapsedTime(&ms, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Number of boxes: %d\n", count);
for (size_t i=0; i<count; i++)
{
size_t bidx = 7 * i;
float prob = boxes[bidx + 2];
if (prob < MIN_PROB || prob > 1) continue;
float clas = boxes[bidx + 1];
float minx = boxes[bidx + 3] * WIDTH;
float miny = boxes[bidx + 4] * WIDTH;
float maxx = boxes[bidx + 5] * WIDTH;
float maxy = boxes[bidx + 6] * WIDTH;
const char* className = classNames[(uint32_t)clas];
printf("%0.02f%% [[%0.01f, %0.01f],[%0.01f, %0.01f]] => %s\n", prob*100, minx, miny, maxx, maxy, className);
}
free(boxes);
printf("inference time: %0.04f ms\n\n", ms);
rc = hipGetLastError();
if (hipSuccess != rc) throw "CUDA ERROR";
// check escape pressed
if (display.events())
{
display.cudaUnmap(stream);
hipStreamDestroy(stream);
return 0;
}
usleep(1000);
}
}
catch (const char* &ex)
{
fprintf(stderr, "ERROR: %s\n", ex);
fflush(stderr);
return 1;
}
return 0;
}
| 1c2fe86da856033822583bfb226af64827a8bf3e.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <math.h>
#include <sys/stat.h>
#if USE_NVJPEG
#include <nvjpeg.h>
#else
#include <jpeglib.h>
#endif
#define WIDTH 1920
#define HEIGHT 1080
#include <display.h>
#include <pthread.h>
#include <math.h>
#include <inference.h>
#include <operators.h>
#include <asyncwork.h>
#include <jpegcodec.h>
#include <file.h>
#ifndef TITLE
#define TITLE "CUDA INFERENCE DEMO"
#endif
#define MIN_PROB 0.4
#ifndef USE_NVJPEG
#define USE_NVJPEG 0
#endif
//width and height defines come from inference.h at the moment
static uint8_t* imageBuffer = {0};
const char* const classNames[] = {
"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa","train", "tvmonitor"
};
__global__
void f_test(float4* out, int pitch_out, int width, int height)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= width || y >= height) return;
out[y * pitch_out / sizeof(float4) + x] = make_float4(
(float) x / width,
(float) y / height,
0, 1);
}
// RGB interleaved as 3 byte tupels
__global__
void f_jpeg(float4* out, int pitch_out, uint8_t* rgb)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= WIDTH || y >= HEIGHT) return;
out[y * pitch_out / sizeof(float4) + x] = make_float4(
rgb[0 + y * WIDTH * 3 + x * 3] / 255.0f,
rgb[1 + y * WIDTH * 3 + x * 3] / 255.0f,
rgb[2 + y * WIDTH * 3 + x * 3] / 255.0f,
1);
}
#define SDIV (WIDTH / 300.0f)
__global__
void f_normalize(float* normalized, uint8_t* rgb)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= 300 || y >= 300) return;
int sx = (int) (x * SDIV);
int sy = (int) (y * SDIV);
bool valid = sx < WIDTH && sy < HEIGHT;
size_t offset = sy * WIDTH + sx;
size_t scstride = 300 * 300;
size_t soffset = y * 300 + x;
normalized[soffset + 0 * scstride] = valid ? rgb[offset*3 + 2] - 104.0f : 0;
normalized[soffset + 1 * scstride] = valid ? rgb[offset*3 + 1] - 117.0f : 0;
normalized[soffset + 2 * scstride] = valid ? rgb[offset*3 + 0] - 123.0f : 0;
}
__global__
void f_bbox(float4* out, int pitch_out, float* boxes, uint32_t* nboxes)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if (x >= WIDTH || y >= HEIGHT) return;
int classification = 0;
for (int i=0; i<*nboxes; i++)
{
float* box = boxes + i * 7;
if (box[2] < MIN_PROB || box[2] > 1.0) continue;
if (box[1] <= 0 || box[1] >= 21) continue;
float minx = box[3] * WIDTH;
float miny = box[4] * WIDTH;
float maxx = box[5] * WIDTH;
float maxy = box[6] * WIDTH;
if (x < minx || x > maxx || y < miny || y > maxy) continue;
classification = box[1];
float alpha = 0.4;
float4 color = classification ? make_float4(
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.00f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.33f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.66f) * 2 * M_PI),
alpha) : make_float4(0,0,0,0);
int idx = y * pitch_out/sizeof(float4) + x;
out[idx] = out[idx] * (1-color.w) + color;
}
#if 0
float alpha = 0.4;
float4 color = classification ? make_float4(
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.00f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.33f) * 2 * M_PI),
alpha/2 + alpha/2 * __sinf((classification/20.0f+0.66f) * 2 * M_PI),
alpha) : make_float4(0,0,0,0);
int idx = y * pitch_out/sizeof(float4) + x;
out[idx] = out[idx] * (1-color.w) + color;
#endif
}
int smToCores(int major, int minor)
{
switch ((major << 4) | minor)
{
case (9999 << 4 | 9999):
return 1;
case 0x30:
case 0x32:
case 0x35:
case 0x37:
return 192;
case 0x50:
case 0x52:
case 0x53:
return 128;
case 0x60:
return 64;
case 0x61:
case 0x62:
return 128;
case 0x70:
case 0x72:
case 0x75:
return 64;
case 0x80:
case 0x86:
return 64;
default:
return 0;
};
}
void selectGPU()
{
int rc;
int maxId = -1;
uint16_t maxScore = 0;
int count = 0;
cudaDeviceProp prop;
rc = cudaGetDeviceCount(&count);
if (cudaSuccess != rc) throw "cudaGetDeviceCount error";
if (count == 0) throw "No suitable cuda device found";
for (int id = 0; id < count; id++)
{
rc = cudaGetDeviceProperties(&prop, id);
if (cudaSuccess != rc) throw "Unable to get device properties";
if (prop.computeMode == cudaComputeModeProhibited)
{
printf("GPU %d: PROHIBITED\n", id);
continue;
}
int sm_per_multiproc = smToCores(prop.major, prop.minor);
printf("GPU %d: \"%s\"\n", id, prop.name);
printf(" - Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" - Multiprocessors: %d\n", prop.multiProcessorCount);
printf(" - SMs per processor: %d\n", sm_per_multiproc);
printf(" - Clock rate: %d\n", prop.clockRate);
uint64_t score =(uint64_t) prop.multiProcessorCount * sm_per_multiproc * prop.clockRate;
if (score > maxScore)
{
maxId = id;
maxScore = score;
}
}
if (maxId < 0) throw "All cuda devices prohibited";
rc = cudaSetDevice(maxId);
if (cudaSuccess != rc) throw "Unable to set cuda device";
rc = cudaGetDeviceProperties(&prop, maxId);
if (cudaSuccess != rc) throw "Unable to get device properties";
printf("\nSelected GPU %d: \"%s\" with compute capability %d.%d\n\n",
maxId, prop.name, prop.major, prop.minor);
}
int main(int /*argc*/, char** /*argv*/)
{
int rc;
cudaStream_t stream = 0;
try
{
printf("Selecting the best GPU\n");
selectGPU();
rc = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
if (cudaSuccess != rc) throw "Unable to create CUDA stream";
const char* jpegPath = "cars-back.jpg";
printf("Loading \"%s\"\n", jpegPath);
JpegCodec codec;
codec.prepare(WIDTH, HEIGHT, 3, 90);
{
cudaMalloc(&imageBuffer, WIDTH * HEIGHT * 3);
File jpeg;
jpeg.readAll(jpegPath);
#if USE_NVJPEG
codec.decodeToDeviceMemoryGPU(imageBuffer, jpeg.data(), jpeg.size(), stream);
#else
codec.decodeToDeviceMemoryCPU(imageBuffer, jpeg.data(), jpeg.size(), stream);
#endif
cudaStreamSynchronize(stream);
}
// copy to output folder
const char* modelPath = "../../models/ssd.engine";
const char* prototxt = "../../models/ssd.prototxt";
const char* caffemodel= "../../models/ssd.caffemodel";
printf("Loading \"%s\"\n", modelPath);
Model model(modelPath, prototxt, caffemodel);
printf("Creating screen\n");
CudaDisplay display(TITLE, WIDTH, HEIGHT);
cudaDeviceSynchronize();
dim3 blockSize = { 16, 16 };
dim3 gridSize = {
(WIDTH + blockSize.x - 1) / blockSize.x,
(HEIGHT + blockSize.y - 1) / blockSize.y
};
dim3 gridSize300 = {
(300 + blockSize.x - 1) / blockSize.x,
(300 + blockSize.y - 1) / blockSize.y
};
display.cudaMap(stream);
while (true)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
#if 0
f_test<<<gridSize, blockSize, 0, stream>>>(
display.CUDA.frame.data,
display.CUDA.frame.pitch,
display.CUDA.frame.width,
display.CUDA.frame.height
);
#endif
f_normalize<<<gridSize300, blockSize, 0, stream>>>(
(float*)model.inputFrame.data,
imageBuffer
);
f_jpeg<<<gridSize, blockSize, 0, stream>>>(
display.CUDA.frame.data,
display.CUDA.frame.pitch,
imageBuffer
);
cudaEventRecord(start,stream);
model.infer(stream);
cudaEventRecord(stop,stream);
// Copy the bounding boxes to host memory to display them
// on the command line
uint32_t count;
cudaMemcpyAsync(&count, model.keepCount.data, sizeof(uint32_t), cudaMemcpyDeviceToHost, stream);
size_t bsize = count * 7 * sizeof(float);
float* boxes = (float*)malloc(bsize);
cudaMemcpyAsync(boxes, model.boxesFrame.data, bsize, cudaMemcpyDeviceToHost, stream);
// Draw the boxes (from device memory)
f_bbox<<<gridSize, blockSize, 0, stream>>>(
display.CUDA.frame.data,
display.CUDA.frame.pitch,
(float*) model.boxesFrame.data,
(uint32_t*) model.keepCount.data);
// This is also done by display.cudaFinish in this example.
cudaStreamSynchronize(stream);
// Here we know all drawing has been done, and all memcpy's are finished
// Due to the synchronize above
// Draw the pixelbuffer on screen
display.cudaFinish(stream);
display.render(stream);
float ms;
cudaEventElapsedTime(&ms, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Number of boxes: %d\n", count);
for (size_t i=0; i<count; i++)
{
size_t bidx = 7 * i;
float prob = boxes[bidx + 2];
if (prob < MIN_PROB || prob > 1) continue;
float clas = boxes[bidx + 1];
float minx = boxes[bidx + 3] * WIDTH;
float miny = boxes[bidx + 4] * WIDTH;
float maxx = boxes[bidx + 5] * WIDTH;
float maxy = boxes[bidx + 6] * WIDTH;
const char* className = classNames[(uint32_t)clas];
printf("%0.02f%% [[%0.01f, %0.01f],[%0.01f, %0.01f]] => %s\n", prob*100, minx, miny, maxx, maxy, className);
}
free(boxes);
printf("inference time: %0.04f ms\n\n", ms);
rc = cudaGetLastError();
if (cudaSuccess != rc) throw "CUDA ERROR";
// check escape pressed
if (display.events())
{
display.cudaUnmap(stream);
cudaStreamDestroy(stream);
return 0;
}
usleep(1000);
}
}
catch (const char* &ex)
{
fprintf(stderr, "ERROR: %s\n", ex);
fflush(stderr);
return 1;
}
return 0;
}
|
bdf1ccd3f166819c6cda57e80464fe1f7ee42e50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define ANTS 1600
#define WIDTH 512
#define HEIGHT 512
/*
* Osnovna struktura koja sadrzi podatke o svakoj poziciji slike
*/
__device__ position deviceImage[HEIGHT][WIDTH];
/*
* Dio memorijskog prostora za teksture namijenjen cuvanju vrijednosti sivih razni slike
*/
texture<float, 2, hipReadModeElementType> imageValuesTexture;
/*
* Dio memorijskog prostora za teksture namijenjen cuvanju heuristickih vrijednosti
* svake pozicije
*/
texture<float, 2, hipReadModeElementType> heuristicsTexture;
/*
* Kernel koji inicjalizira generator slucajnih brojeva
*/
__global__ void setupRandomsGenerator(hiprandState_t *state, unsigned long int seed){
int id = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(seed + id, id, 0, &state[id]);
}
/*
* Kernel koji inicijalizira vrijednosti feromonskih tragova na pocetnu vrijednost
*/
__global__ void init(float *values, size_t pitch, float maxValue){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float *q = (float *)((char *)values + j * pitch) + i;
*q /= maxValue;
deviceImage[i][j].pheromone = 0.001;
deviceImage[i][j].antCount = 0;
deviceImage[i][j].i = i;
deviceImage[i][j].j = j;
}
/*
* Kernel koji racuna vidljivost svake pozicije i odreduje susjedne pozicije
* trenutno promatrane pozicije
*/
__global__ void setHeuristics(float *heuristics, int pitch){
float tl, tm, tr;
float ml, mr;
float bl, bm, br;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float intens[4];
float current = tex2D(imageValuesTexture, i, j);
tl = (i - 1 >= 0 && j - 1 >= 0) ? tex2D(imageValuesTexture, i - 1, j - 1) : current;
br = (i + 1 <= HEIGHT && j + 1 <= WIDTH) ? tex2D(imageValuesTexture, i - 1, j - 1) : current;
tr = (i - 1 >= 0 && j + 1 <= WIDTH) ? tex2D(imageValuesTexture, i - 1, j + 1) : current;
bl = (i + 1 <= HEIGHT && j - 1 >= 0) ? tex2D(imageValuesTexture, i + 1, j - 1) : current;
tm = (i - 1 >= 0) ? tex2D(imageValuesTexture, i - 1, j) : current;
bm = (i + 1 < HEIGHT) ? tex2D(imageValuesTexture, i + 1, j) : current;
ml = (j - 1 >= 0) ? tex2D(imageValuesTexture, i, j - 1) : current;
mr = (j + 1 < WIDTH) ? tex2D(imageValuesTexture, i, j + 1) : current;
intens[0] = fabs(tl - br);
intens[1] = fabs(tr - bl);
intens[2] = fabs(ml - mr);
intens[3] = fabs(tm - bm);
float max = intens[0];
for (int k = 1; k < 4; ++k) {
max = max > intens[k] ? max : intens[k];
}
float *currentHeuristicValue = (float *)((char *)heuristics + j * pitch) + i;
*currentHeuristicValue = current * max;
int index = 0;
if (i - 1 >= 0 && j - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i-1][j-1];
if (i + 1 < HEIGHT && j + 1 < WIDTH) deviceImage[i][j].neigh[index++] = &deviceImage[i+1][j+1];
if (i - 1 >= 0 && j + 1 < WIDTH) deviceImage[i][j].neigh[index++] = &deviceImage[i-1][j+1];
if (i + 1 < HEIGHT && j - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i+1][j-1];
if (i - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i-1][j];
if (i + 1 < HEIGHT) deviceImage[i][j].neigh[index++] = &deviceImage[i+1][j];
if (j - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i][j-1];
if (j + 1 < WIDTH) deviceImage[i][j].neigh[index++] = &deviceImage[i][j+1];
deviceImage[i][j].neighCount = index;
}
/*
* Kernel koji inicijalizira pozicije mrava (odreduje pocetne polozaje)
*/
__global__ void setAnts(ant *ants, hiprandState_t *states){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= ANTS) return;
hiprandState_t localState = states[id];
ants[id] = ant(hiprand(&localState) % 15 + 25 + 1);
int i = hiprand(&localState) % HEIGHT;
int j = hiprand(&localState) % WIDTH;
while (atomicCAS(&deviceImage[i][j].antCount, 1, 1)){
i = hiprand(&localState) % HEIGHT;
j = hiprand(&localState) % WIDTH;
}
atomicAdd(&deviceImage[i][j].antCount, 1);
ants[id].path.push_back(&deviceImage[i][j]);
states[id] = localState;
}
/*
* Kernel koji obavlja hod jednog mrav (odreduje sljedecu poziciju)
*/
__global__ void walk(ant *ants, hiprandState_t *states){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= ANTS) return;
int admissibleCount = 0;
position *admissible[8];
position *last = ants[id].path.last();
double probabilities[8];
double probSum = 0;
hiprandState_t localState = states[id];
if (ants[id].path.count == 1){
for (int i = 0; i < last->neighCount; ++i){
admissible[i] = last->neigh[i];
}
admissibleCount = last->neighCount;
for (int i = 0; i < admissibleCount; ++i){
position *tmp = admissible[i];
probabilities[i] = powf(tmp->pheromone, 4) * powf(tex2D(heuristicsTexture, tmp->i, tmp->j), 2);
probSum += probabilities[i];
}
}
else {
position *penultimate = ants[id].path.penultimate();
for (int i = 0; i < last->neighCount; ++i){
if (ants[id].path.contains(last->neigh[i])) continue;
if (last->neigh[i] == penultimate) continue;
admissible[admissibleCount++] = last->neigh[i];
}
for (int i = 0; i < admissibleCount; ++i){
position *tmp = admissible[i];
probabilities[i] = powf(tmp->pheromone, 4) * powf(tex2D(heuristicsTexture, tmp->i, tmp->j), 2);
probSum += probabilities[i];
}
}
double r = hiprand_uniform_double(&localState) * probSum;
double acumulatedSum = 0;
position *next = 0;
for (int i = 0; i < admissibleCount; ++i){
acumulatedSum += probabilities[i];
if (r < acumulatedSum) {
next = admissible[i];
break;
}
}
if (!next){
if (admissibleCount) next = admissible[--admissibleCount];
else {
next = ants[id].path[0];
}
}
atomicAdd(&next->antCount, 1);
ants[id].push_back(next);
states[id] = localState;
}
/*
* Kernel koji azurira feromonske tragove
*/
__global__ void updateTrails(){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0;
float heuristicValue = tex2D(heuristicsTexture, i, j);
if (heuristicValue >= 0.08) {
sum = heuristicValue * deviceImage[i][j].antCount;
}
deviceImage[i][j].pheromone = deviceImage[i][j].pheromone * (1 - 0.04) + sum;
deviceImage[i][j].antCount = 0;
}
| bdf1ccd3f166819c6cda57e80464fe1f7ee42e50.cu | #define ANTS 1600
#define WIDTH 512
#define HEIGHT 512
/*
* Osnovna struktura koja sadrzi podatke o svakoj poziciji slike
*/
__device__ position deviceImage[HEIGHT][WIDTH];
/*
* Dio memorijskog prostora za teksture namijenjen cuvanju vrijednosti sivih razni slike
*/
texture<float, 2, cudaReadModeElementType> imageValuesTexture;
/*
* Dio memorijskog prostora za teksture namijenjen cuvanju heuristickih vrijednosti
* svake pozicije
*/
texture<float, 2, cudaReadModeElementType> heuristicsTexture;
/*
* Kernel koji inicjalizira generator slucajnih brojeva
*/
__global__ void setupRandomsGenerator(curandState *state, unsigned long int seed){
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed + id, id, 0, &state[id]);
}
/*
* Kernel koji inicijalizira vrijednosti feromonskih tragova na pocetnu vrijednost
*/
__global__ void init(float *values, size_t pitch, float maxValue){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float *q = (float *)((char *)values + j * pitch) + i;
*q /= maxValue;
deviceImage[i][j].pheromone = 0.001;
deviceImage[i][j].antCount = 0;
deviceImage[i][j].i = i;
deviceImage[i][j].j = j;
}
/*
* Kernel koji racuna vidljivost svake pozicije i odreduje susjedne pozicije
* trenutno promatrane pozicije
*/
__global__ void setHeuristics(float *heuristics, int pitch){
float tl, tm, tr;
float ml, mr;
float bl, bm, br;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float intens[4];
float current = tex2D(imageValuesTexture, i, j);
tl = (i - 1 >= 0 && j - 1 >= 0) ? tex2D(imageValuesTexture, i - 1, j - 1) : current;
br = (i + 1 <= HEIGHT && j + 1 <= WIDTH) ? tex2D(imageValuesTexture, i - 1, j - 1) : current;
tr = (i - 1 >= 0 && j + 1 <= WIDTH) ? tex2D(imageValuesTexture, i - 1, j + 1) : current;
bl = (i + 1 <= HEIGHT && j - 1 >= 0) ? tex2D(imageValuesTexture, i + 1, j - 1) : current;
tm = (i - 1 >= 0) ? tex2D(imageValuesTexture, i - 1, j) : current;
bm = (i + 1 < HEIGHT) ? tex2D(imageValuesTexture, i + 1, j) : current;
ml = (j - 1 >= 0) ? tex2D(imageValuesTexture, i, j - 1) : current;
mr = (j + 1 < WIDTH) ? tex2D(imageValuesTexture, i, j + 1) : current;
intens[0] = fabs(tl - br);
intens[1] = fabs(tr - bl);
intens[2] = fabs(ml - mr);
intens[3] = fabs(tm - bm);
float max = intens[0];
for (int k = 1; k < 4; ++k) {
max = max > intens[k] ? max : intens[k];
}
float *currentHeuristicValue = (float *)((char *)heuristics + j * pitch) + i;
*currentHeuristicValue = current * max;
int index = 0;
if (i - 1 >= 0 && j - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i-1][j-1];
if (i + 1 < HEIGHT && j + 1 < WIDTH) deviceImage[i][j].neigh[index++] = &deviceImage[i+1][j+1];
if (i - 1 >= 0 && j + 1 < WIDTH) deviceImage[i][j].neigh[index++] = &deviceImage[i-1][j+1];
if (i + 1 < HEIGHT && j - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i+1][j-1];
if (i - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i-1][j];
if (i + 1 < HEIGHT) deviceImage[i][j].neigh[index++] = &deviceImage[i+1][j];
if (j - 1 >= 0) deviceImage[i][j].neigh[index++] = &deviceImage[i][j-1];
if (j + 1 < WIDTH) deviceImage[i][j].neigh[index++] = &deviceImage[i][j+1];
deviceImage[i][j].neighCount = index;
}
/*
* Kernel koji inicijalizira pozicije mrava (odreduje pocetne polozaje)
*/
__global__ void setAnts(ant *ants, curandState *states){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= ANTS) return;
curandState localState = states[id];
ants[id] = ant(curand(&localState) % 15 + 25 + 1);
int i = curand(&localState) % HEIGHT;
int j = curand(&localState) % WIDTH;
while (atomicCAS(&deviceImage[i][j].antCount, 1, 1)){
i = curand(&localState) % HEIGHT;
j = curand(&localState) % WIDTH;
}
atomicAdd(&deviceImage[i][j].antCount, 1);
ants[id].path.push_back(&deviceImage[i][j]);
states[id] = localState;
}
/*
* Kernel koji obavlja hod jednog mrav (odreduje sljedecu poziciju)
*/
__global__ void walk(ant *ants, curandState *states){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= ANTS) return;
int admissibleCount = 0;
position *admissible[8];
position *last = ants[id].path.last();
double probabilities[8];
double probSum = 0;
curandState localState = states[id];
if (ants[id].path.count == 1){
for (int i = 0; i < last->neighCount; ++i){
admissible[i] = last->neigh[i];
}
admissibleCount = last->neighCount;
for (int i = 0; i < admissibleCount; ++i){
position *tmp = admissible[i];
probabilities[i] = powf(tmp->pheromone, 4) * powf(tex2D(heuristicsTexture, tmp->i, tmp->j), 2);
probSum += probabilities[i];
}
}
else {
position *penultimate = ants[id].path.penultimate();
for (int i = 0; i < last->neighCount; ++i){
if (ants[id].path.contains(last->neigh[i])) continue;
if (last->neigh[i] == penultimate) continue;
admissible[admissibleCount++] = last->neigh[i];
}
for (int i = 0; i < admissibleCount; ++i){
position *tmp = admissible[i];
probabilities[i] = powf(tmp->pheromone, 4) * powf(tex2D(heuristicsTexture, tmp->i, tmp->j), 2);
probSum += probabilities[i];
}
}
double r = curand_uniform_double(&localState) * probSum;
double acumulatedSum = 0;
position *next = 0;
for (int i = 0; i < admissibleCount; ++i){
acumulatedSum += probabilities[i];
if (r < acumulatedSum) {
next = admissible[i];
break;
}
}
if (!next){
if (admissibleCount) next = admissible[--admissibleCount];
else {
next = ants[id].path[0];
}
}
atomicAdd(&next->antCount, 1);
ants[id].push_back(next);
states[id] = localState;
}
/*
* Kernel koji azurira feromonske tragove
*/
__global__ void updateTrails(){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0;
float heuristicValue = tex2D(heuristicsTexture, i, j);
if (heuristicValue >= 0.08) {
sum = heuristicValue * deviceImage[i][j].antCount;
}
deviceImage[i][j].pheromone = deviceImage[i][j].pheromone * (1 - 0.04) + sum;
deviceImage[i][j].antCount = 0;
}
|
936e4b90bd5a7c1f8182e183a5f581c307a8ba16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@generated from magmablas/zswap.cu, normal z -> d, Tue Aug 30 09:38:33 2016
*/
#include "magma_internal.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void dswap_kernel(
int n,
double *x, int incx,
double *y, int incy )
{
double tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/***************************************************************************//**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx DOUBLE PRECISION array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy DOUBLE PRECISION array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swap
*******************************************************************************/
extern "C" void
magmablas_dswap_q(
magma_int_t n,
magmaDouble_ptr dx, magma_int_t incx,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( magma_ceildiv( n, NB ) );
hipLaunchKernelGGL(( dswap_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dx, incx, dy, incy );
}
| 936e4b90bd5a7c1f8182e183a5f581c307a8ba16.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@author Mark Gates
@generated from magmablas/zswap.cu, normal z -> d, Tue Aug 30 09:38:33 2016
*/
#include "magma_internal.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void dswap_kernel(
int n,
double *x, int incx,
double *y, int incy )
{
double tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/***************************************************************************//**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx DOUBLE PRECISION array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy DOUBLE PRECISION array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swap
*******************************************************************************/
extern "C" void
magmablas_dswap_q(
magma_int_t n,
magmaDouble_ptr dx, magma_int_t incx,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( magma_ceildiv( n, NB ) );
dswap_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dx, incx, dy, incy );
}
|
8ed255e1de0341449c2bc74fdaf83c648603d074.hip | // !!! This is a file automatically generated by hipify!!!
/**
* This is a material that always returns a black color. It is attached on
* instances that have no other materials (e.g. pure lights) and wrapped by
* the NoReflect class.
*/
#include <optix.h>
#include <optix_cuda.h>
#include <optix_world.h>
#include "core.cuh"
RT_CALLABLE_PROGRAM float3 evalBSDFLocal(
const float3& incoming,
const float3& outgoing
) {
return make_float3(0);
}
RT_CALLABLE_PROGRAM float evalPDFLocal(
const float3& incoming,
const float3& outgoing
) {
return 0.0f;
}
RT_CALLABLE_PROGRAM void sampleLocal(
hiprandState_t* rng,
const float3& incoming,
float3* outgoingOut,
float3* bsdfOut,
float* pdfOut
) {
*outgoingOut = make_float3(0);
*bsdfOut = make_float3(0);
*pdfOut = 0.0f;
}
| 8ed255e1de0341449c2bc74fdaf83c648603d074.cu | /**
* This is a material that always returns a black color. It is attached on
* instances that have no other materials (e.g. pure lights) and wrapped by
* the NoReflect class.
*/
#include <optix.h>
#include <optix_cuda.h>
#include <optix_world.h>
#include "core.cuh"
RT_CALLABLE_PROGRAM float3 evalBSDFLocal(
const float3& incoming,
const float3& outgoing
) {
return make_float3(0);
}
RT_CALLABLE_PROGRAM float evalPDFLocal(
const float3& incoming,
const float3& outgoing
) {
return 0.0f;
}
RT_CALLABLE_PROGRAM void sampleLocal(
curandState* rng,
const float3& incoming,
float3* outgoingOut,
float3* bsdfOut,
float* pdfOut
) {
*outgoingOut = make_float3(0);
*bsdfOut = make_float3(0);
*pdfOut = 0.0f;
}
|
e8235e23c32d3c9822e69c30891ce63547f7e181.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 1024
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3* dev_sorted_pos;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 1.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_sorted_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc sorted_dev_pos failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 finalVel = glm::vec3(0.f), perceivedCenter = finalVel, c = finalVel, perceivedVel = finalVel;
int rule1Neighbors = 0, rule3Neighbors = 0;
glm::vec3 boidPos = pos[iSelf];
for (int i = 0; i < N; i++) {
if (i != iSelf) {
glm::vec3 bPos = pos[i];
float dist = glm::distance(bPos, boidPos);
// rule1: cohesion
if (dist < rule1Distance) {
rule1Neighbors++;
perceivedCenter += bPos;
}
// separation
if (dist < rule2Distance) {
c -= bPos - boidPos;
}
// alignment
if (dist < rule3Distance) {
rule3Neighbors++;
perceivedVel += vel[i];
}
}
}
if (rule1Neighbors != 0) {
perceivedCenter /= rule1Neighbors;
finalVel += (perceivedCenter - boidPos) * rule1Scale;
}
finalVel += c * rule2Scale;
// alignment
if (rule3Neighbors != 0) {
perceivedVel /= rule3Neighbors;
finalVel += perceivedVel * rule3Scale;
}
return vel[iSelf] + finalVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 vel = computeVelocityChange(N, idx, pos, vel1);
if (glm::length(vel) > maxSpeed) {
vel = glm::normalize(vel) * maxSpeed;
}
vel2[idx] = vel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__device__ int gridIndex3Dto1D(glm::vec3 gridIndex3D, int gridResolution) {
return gridIndex3D.x + gridIndex3D.y * gridResolution + gridIndex3D.z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 cellIdx3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
gridIndices[index] = gridIndex3Dto1D(cellIdx3D, gridResolution);
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int cellIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[cellIndex] = index;
return;
}
if (index == N - 1) {
gridCellEndIndices[cellIndex] = index;
return;
}
int cellIndexBefore = particleGridIndices[index - 1];
if (cellIndex != cellIndexBefore) {
gridCellEndIndices[cellIndexBefore] = index - 1;
gridCellStartIndices[cellIndex] = index;
}
}
__device__ int* getNeighborCells8(int gridResolution, glm::vec3 gridMin, float cellWidth,
glm::vec3 cellIdx3D, glm::vec3 boidPos, int* gridCellStartIndices, int* gridCellEndIndices) {
const int size = 8;
int neighborCellIdxs[size];
for (int i = 0; i < size; i++) {
neighborCellIdxs[i] = -1;
}
int tracker = -1;
int xDir, yDir, zDir = 0;
float searchDist = imax(imax(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 cornerPos = gridMin + cellIdx3D * cellWidth;
if (boidPos.x - searchDist < cornerPos.x) {
if (cellIdx3D.x != 0) {
xDir = -1;
}
} else {
if (cellIdx3D.x != gridResolution - 1) {
xDir = 1;
}
}
if (boidPos.y - searchDist < cornerPos.y) {
if (cellIdx3D.y != 0) {
yDir = -1;
}
}
else {
if (cellIdx3D.y != gridResolution - 1) {
yDir = 1;
}
}
if (boidPos.z - searchDist < cornerPos.z) {
if (cellIdx3D.z != 0) {
zDir = -1;
}
}
else {
if (cellIdx3D.z != gridResolution - 1) {
zDir = 1;
}
}
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
if (xDir != 0) {
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdx3D.x += xDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
if (yDir != 0) {
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdx3D.y += yDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
if (xDir != 0 && yDir != 0) {
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdx3D.x += xDir;
neighborCellIdx3D.y += yDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
return neighborCellIdxs;
}
__device__ int* getNeighborCells27(int gridResolution, glm::vec3 gridMin, float cellWidth,
glm::vec3 cellIdx3D, glm::vec3 boidPos, int* gridCellStartIndices, int* gridCellEndIndices) {
const int size = 27;
int neighborCellIdxs[size];
for (int i = 0; i < size; i++) {
neighborCellIdxs[i] = -1;
}
int tracker = -1;
for (int k = cellIdx3D.z - 1; k <= cellIdx3D.z + 1; k++) {
for (int j = cellIdx3D.y - 1; j <= cellIdx3D.y + 1; j++) {
for (int i = cellIdx3D.x - 1; i <= cellIdx3D.x + 1; i++) {
if (i < 0 || j < 0 || k < 0 || i >= gridResolution || j >= gridResolution || k >= gridResolution) {
continue;
}
glm::vec3 neighborCellIdx3D(i, j, k);
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
}
return neighborCellIdxs;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 finalVel = glm::vec3(0.f), perceivedCenter = finalVel, c = finalVel, perceivedVel = finalVel;
int rule1Neighbors = 0, rule3Neighbors = 0;
glm::vec3 cellIdx3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
int cellIndex = gridIndex3Dto1D(cellIdx3D, gridResolution);
glm::vec3 boidPos = pos[index];
int* neighborCellIdxs = getNeighborCells27(gridResolution, gridMin, cellWidth, cellIdx3D, boidPos,
gridCellStartIndices, gridCellEndIndices);
for (int i = 0; i < 27; i++) {
int cellIdx = neighborCellIdxs[i];
if (cellIdx == -1) {
continue;
}
if (gridCellStartIndices[cellIdx] == -1) {
continue;
}
for (int j = gridCellStartIndices[cellIdx]; j <= gridCellEndIndices[cellIdx]; j++) {
int boidIndex = particleArrayIndices[j];
if (boidIndex != index) {
glm::vec3 bPos = pos[boidIndex];
float dist = glm::distance(bPos, boidPos);
// rule1: cohesion
if (dist < rule1Distance) {
rule1Neighbors++;
perceivedCenter += bPos;
}
// separation
if (dist < rule2Distance) {
c -= bPos - boidPos;
}
// alignment
if (dist < rule3Distance) {
rule3Neighbors++;
perceivedVel += vel1[boidIndex];
}
}
}
}
if (rule1Neighbors != 0) {
perceivedCenter /= rule1Neighbors;
finalVel += (perceivedCenter - boidPos) * rule1Scale;
}
finalVel += c * rule2Scale;
// alignment
if (rule3Neighbors != 0) {
perceivedVel /= rule3Neighbors;
finalVel += perceivedVel * rule3Scale;
}
finalVel += vel1[index];
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
__global__ void kernSortPosVel(int N, int* particleArrayIndices, glm::vec3* pos,
glm::vec3 *sortPos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
sortPos[index] = pos[particleArrayIndices[index]];
vel2[index] = vel1[particleArrayIndices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 finalVel = glm::vec3(0.f), perceivedCenter = finalVel, c = finalVel, perceivedVel = finalVel;
int rule1Neighbors = 0, rule3Neighbors = 0;
glm::vec3 cellIdx3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
int cellIndex = gridIndex3Dto1D(cellIdx3D, gridResolution);
glm::vec3 boidPos = pos[index];
int* neighborCellIdxs = getNeighborCells27(gridResolution, gridMin, cellWidth, cellIdx3D, boidPos,
gridCellStartIndices, gridCellEndIndices);
for (int i = 0; i < 27; i++) {
int cellIdx = neighborCellIdxs[i];
if (cellIdx == -1) {
continue;
}
if (gridCellStartIndices[cellIdx] == -1) {
continue;
}
for (int j = gridCellStartIndices[cellIdx]; j <= gridCellEndIndices[cellIdx]; j++) {
//int boidIndex = particleArrayIndices[j];
if (j != index) {
glm::vec3 bPos = pos[j];
float dist = glm::distance(bPos, boidPos);
// rule1: cohesion
if (dist < rule1Distance) {
rule1Neighbors++;
perceivedCenter += bPos;
}
// separation
if (dist < rule2Distance) {
c -= bPos - boidPos;
}
// alignment
if (dist < rule3Distance) {
rule3Neighbors++;
perceivedVel += vel1[j];
}
}
}
}
if (rule1Neighbors != 0) {
perceivedCenter /= rule1Neighbors;
finalVel += (perceivedCenter - boidPos) * rule1Scale;
}
finalVel += c * rule2Scale;
// alignment
if (rule3Neighbors != 0) {
perceivedVel /= rule3Neighbors;
finalVel += perceivedVel * rule3Scale;
}
finalVel += vel1[index];
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects,
dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
// ping-pong
hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridCells((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
checkCUDAErrorWithLine("thrust sort failed!");
kernResetIntBuffer << <fullBlocksPerGridCells, blockSize >> > (gridCellCount,
dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// ping-pong
hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridCells((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
checkCUDAErrorWithLine("thrust sort failed!");
kernResetIntBuffer << <fullBlocksPerGridCells, blockSize >> > (gridCellCount,
dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernSortPosVel << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos,
dev_sorted_pos, dev_vel1, dev_vel2);
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_sorted_pos, dev_vel2, dev_vel1);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_sorted_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// ping-pong
hipMemcpy(dev_vel2, dev_vel1, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
hipMemcpy(dev_pos, dev_sorted_pos, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_sorted_pos);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| e8235e23c32d3c9822e69c30891ce63547f7e181.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 1024
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3* dev_sorted_pos;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 1.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_sorted_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc sorted_dev_pos failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 finalVel = glm::vec3(0.f), perceivedCenter = finalVel, c = finalVel, perceivedVel = finalVel;
int rule1Neighbors = 0, rule3Neighbors = 0;
glm::vec3 boidPos = pos[iSelf];
for (int i = 0; i < N; i++) {
if (i != iSelf) {
glm::vec3 bPos = pos[i];
float dist = glm::distance(bPos, boidPos);
// rule1: cohesion
if (dist < rule1Distance) {
rule1Neighbors++;
perceivedCenter += bPos;
}
// separation
if (dist < rule2Distance) {
c -= bPos - boidPos;
}
// alignment
if (dist < rule3Distance) {
rule3Neighbors++;
perceivedVel += vel[i];
}
}
}
if (rule1Neighbors != 0) {
perceivedCenter /= rule1Neighbors;
finalVel += (perceivedCenter - boidPos) * rule1Scale;
}
finalVel += c * rule2Scale;
// alignment
if (rule3Neighbors != 0) {
perceivedVel /= rule3Neighbors;
finalVel += perceivedVel * rule3Scale;
}
return vel[iSelf] + finalVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 vel = computeVelocityChange(N, idx, pos, vel1);
if (glm::length(vel) > maxSpeed) {
vel = glm::normalize(vel) * maxSpeed;
}
vel2[idx] = vel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__device__ int gridIndex3Dto1D(glm::vec3 gridIndex3D, int gridResolution) {
return gridIndex3D.x + gridIndex3D.y * gridResolution + gridIndex3D.z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 cellIdx3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
gridIndices[index] = gridIndex3Dto1D(cellIdx3D, gridResolution);
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int cellIndex = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[cellIndex] = index;
return;
}
if (index == N - 1) {
gridCellEndIndices[cellIndex] = index;
return;
}
int cellIndexBefore = particleGridIndices[index - 1];
if (cellIndex != cellIndexBefore) {
gridCellEndIndices[cellIndexBefore] = index - 1;
gridCellStartIndices[cellIndex] = index;
}
}
__device__ int* getNeighborCells8(int gridResolution, glm::vec3 gridMin, float cellWidth,
glm::vec3 cellIdx3D, glm::vec3 boidPos, int* gridCellStartIndices, int* gridCellEndIndices) {
const int size = 8;
int neighborCellIdxs[size];
for (int i = 0; i < size; i++) {
neighborCellIdxs[i] = -1;
}
int tracker = -1;
int xDir, yDir, zDir = 0;
float searchDist = imax(imax(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 cornerPos = gridMin + cellIdx3D * cellWidth;
if (boidPos.x - searchDist < cornerPos.x) {
if (cellIdx3D.x != 0) {
xDir = -1;
}
} else {
if (cellIdx3D.x != gridResolution - 1) {
xDir = 1;
}
}
if (boidPos.y - searchDist < cornerPos.y) {
if (cellIdx3D.y != 0) {
yDir = -1;
}
}
else {
if (cellIdx3D.y != gridResolution - 1) {
yDir = 1;
}
}
if (boidPos.z - searchDist < cornerPos.z) {
if (cellIdx3D.z != 0) {
zDir = -1;
}
}
else {
if (cellIdx3D.z != gridResolution - 1) {
zDir = 1;
}
}
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
if (xDir != 0) {
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdx3D.x += xDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
if (yDir != 0) {
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdx3D.y += yDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
if (xDir != 0 && yDir != 0) {
glm::vec3 neighborCellIdx3D = cellIdx3D;
neighborCellIdx3D.x += xDir;
neighborCellIdx3D.y += yDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
if (zDir != 0) {
neighborCellIdx3D.z += zDir;
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
return neighborCellIdxs;
}
__device__ int* getNeighborCells27(int gridResolution, glm::vec3 gridMin, float cellWidth,
glm::vec3 cellIdx3D, glm::vec3 boidPos, int* gridCellStartIndices, int* gridCellEndIndices) {
const int size = 27;
int neighborCellIdxs[size];
for (int i = 0; i < size; i++) {
neighborCellIdxs[i] = -1;
}
int tracker = -1;
for (int k = cellIdx3D.z - 1; k <= cellIdx3D.z + 1; k++) {
for (int j = cellIdx3D.y - 1; j <= cellIdx3D.y + 1; j++) {
for (int i = cellIdx3D.x - 1; i <= cellIdx3D.x + 1; i++) {
if (i < 0 || j < 0 || k < 0 || i >= gridResolution || j >= gridResolution || k >= gridResolution) {
continue;
}
glm::vec3 neighborCellIdx3D(i, j, k);
neighborCellIdxs[++tracker] = gridIndex3Dto1D(neighborCellIdx3D, gridResolution);
}
}
}
return neighborCellIdxs;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 finalVel = glm::vec3(0.f), perceivedCenter = finalVel, c = finalVel, perceivedVel = finalVel;
int rule1Neighbors = 0, rule3Neighbors = 0;
glm::vec3 cellIdx3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
int cellIndex = gridIndex3Dto1D(cellIdx3D, gridResolution);
glm::vec3 boidPos = pos[index];
int* neighborCellIdxs = getNeighborCells27(gridResolution, gridMin, cellWidth, cellIdx3D, boidPos,
gridCellStartIndices, gridCellEndIndices);
for (int i = 0; i < 27; i++) {
int cellIdx = neighborCellIdxs[i];
if (cellIdx == -1) {
continue;
}
if (gridCellStartIndices[cellIdx] == -1) {
continue;
}
for (int j = gridCellStartIndices[cellIdx]; j <= gridCellEndIndices[cellIdx]; j++) {
int boidIndex = particleArrayIndices[j];
if (boidIndex != index) {
glm::vec3 bPos = pos[boidIndex];
float dist = glm::distance(bPos, boidPos);
// rule1: cohesion
if (dist < rule1Distance) {
rule1Neighbors++;
perceivedCenter += bPos;
}
// separation
if (dist < rule2Distance) {
c -= bPos - boidPos;
}
// alignment
if (dist < rule3Distance) {
rule3Neighbors++;
perceivedVel += vel1[boidIndex];
}
}
}
}
if (rule1Neighbors != 0) {
perceivedCenter /= rule1Neighbors;
finalVel += (perceivedCenter - boidPos) * rule1Scale;
}
finalVel += c * rule2Scale;
// alignment
if (rule3Neighbors != 0) {
perceivedVel /= rule3Neighbors;
finalVel += perceivedVel * rule3Scale;
}
finalVel += vel1[index];
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
__global__ void kernSortPosVel(int N, int* particleArrayIndices, glm::vec3* pos,
glm::vec3 *sortPos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
sortPos[index] = pos[particleArrayIndices[index]];
vel2[index] = vel1[particleArrayIndices[index]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 finalVel = glm::vec3(0.f), perceivedCenter = finalVel, c = finalVel, perceivedVel = finalVel;
int rule1Neighbors = 0, rule3Neighbors = 0;
glm::vec3 cellIdx3D = glm::floor((pos[index] - gridMin) * inverseCellWidth);
int cellIndex = gridIndex3Dto1D(cellIdx3D, gridResolution);
glm::vec3 boidPos = pos[index];
int* neighborCellIdxs = getNeighborCells27(gridResolution, gridMin, cellWidth, cellIdx3D, boidPos,
gridCellStartIndices, gridCellEndIndices);
for (int i = 0; i < 27; i++) {
int cellIdx = neighborCellIdxs[i];
if (cellIdx == -1) {
continue;
}
if (gridCellStartIndices[cellIdx] == -1) {
continue;
}
for (int j = gridCellStartIndices[cellIdx]; j <= gridCellEndIndices[cellIdx]; j++) {
//int boidIndex = particleArrayIndices[j];
if (j != index) {
glm::vec3 bPos = pos[j];
float dist = glm::distance(bPos, boidPos);
// rule1: cohesion
if (dist < rule1Distance) {
rule1Neighbors++;
perceivedCenter += bPos;
}
// separation
if (dist < rule2Distance) {
c -= bPos - boidPos;
}
// alignment
if (dist < rule3Distance) {
rule3Neighbors++;
perceivedVel += vel1[j];
}
}
}
}
if (rule1Neighbors != 0) {
perceivedCenter /= rule1Neighbors;
finalVel += (perceivedCenter - boidPos) * rule1Scale;
}
finalVel += c * rule2Scale;
// alignment
if (rule3Neighbors != 0) {
perceivedVel /= rule3Neighbors;
finalVel += perceivedVel * rule3Scale;
}
finalVel += vel1[index];
if (glm::length(finalVel) > maxSpeed) {
finalVel = glm::normalize(finalVel) * maxSpeed;
}
vel2[index] = finalVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce <<< fullBlocksPerGrid, blockSize >>> (numObjects,
dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
// ping-pong
cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridCells((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
checkCUDAErrorWithLine("thrust sort failed!");
kernResetIntBuffer << <fullBlocksPerGridCells, blockSize >> > (gridCellCount,
dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// ping-pong
cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridCells((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices);
checkCUDAErrorWithLine("thrust sort failed!");
kernResetIntBuffer << <fullBlocksPerGridCells, blockSize >> > (gridCellCount,
dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernSortPosVel << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleArrayIndices, dev_pos,
dev_sorted_pos, dev_vel1, dev_vel2);
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_sorted_pos, dev_vel2, dev_vel1);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_sorted_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// ping-pong
cudaMemcpy(dev_vel2, dev_vel1, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_pos, dev_sorted_pos, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_sorted_pos);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
f6f41379e3287388b3b29042685ef1836ebb43a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur rel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur rel
// Multiplie point par point un vecteur complex par un vecteur rel
// Applique y = at*x +bt chaque point d'un vecteur rel
// Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de rel
// Alpha n'est pas modifi
// Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de bytes
// Alpha n'est pas modifi
// Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de rel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-rgressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linaire la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linaire la source
__global__ void FillTexTh(void *surface, int width, int height, size_t pitch, double* src, int Mask, int th, int pixValue)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel1;
if (x >= width || y >= height) return;
double w = src[x + width*y];
if (w<0) {w=0;}
if (w>253) {w=253;}
pixel1 = (unsigned char *)( (char*)surface + y*pitch) + 4*x;
if (pixel1[3]>=th)
for (int i=0;i<3;i++)
{ if (Mask & (1<<i)) pixel1[i] = w; }
else
for (int i=0;i<3;i++)
{ if (Mask & (1<<i)) pixel1[i] = pixValue >> (i*8); }
} | f6f41379e3287388b3b29042685ef1836ebb43a7.cu | #include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur réel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur réel
// Multiplie point par point un vecteur complex par un vecteur réel
// Applique y = at*x +bt à chaque point d'un vecteur réel
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-régressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linéaire à la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linéaire à la source
__global__ void FillTexTh(void *surface, int width, int height, size_t pitch, double* src, int Mask, int th, int pixValue)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel1;
if (x >= width || y >= height) return;
double w = src[x + width*y];
if (w<0) {w=0;}
if (w>253) {w=253;}
pixel1 = (unsigned char *)( (char*)surface + y*pitch) + 4*x;
if (pixel1[3]>=th)
for (int i=0;i<3;i++)
{ if (Mask & (1<<i)) pixel1[i] = w; }
else
for (int i=0;i<3;i++)
{ if (Mask & (1<<i)) pixel1[i] = pixValue >> (i*8); }
} |
c3762408d1cf4b71251c33f272aadfebcdf71308.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <df/optimization/nonrigidIcp.h>
#include <df/camera/poly3.h> // TODO
#include <df/camera/linear.h> // TODO
#include <df/util/dualQuaternion.h> // TODO
#include <sophus/se3.hpp>
#include <df/util/cudaHelpers.h>
#include <df/util/debugHelpers.h>
namespace df {
//template <typename Scalar, template <typename, int...> class TransformT,
// typename Derived, typename std::enable_if<Eigen::internal::traits<Derived>::RowsAtCompileTime == 3 &&
// Eigen::internal::traits<Derived>::ColsAtCompileTime == 1 &&
// std::is_same<typename Eigen::internal::traits<Derived>::Scalar, Scalar>::value, int>::type = 0>
//__host__ __device__ Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> rotate(const TransformT<Scalar> & transform, const Eigen::MatrixBase<Derived> & vector);
template <typename Scalar,
typename Derived, typename std::enable_if<Eigen::internal::traits<Derived>::RowsAtCompileTime == 3 &&
Eigen::internal::traits<Derived>::ColsAtCompileTime == 1 &&
std::is_same<typename Eigen::internal::traits<Derived>::Scalar, Scalar>::value, int>::type = 0>
__host__ __device__ Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> rotate(const Sophus::SE3Group<Scalar> & transform, const Eigen::MatrixBase<Derived> & vector) {
return transform.so3() * vector;
}
template <typename Scalar,
typename Derived, typename std::enable_if<Eigen::internal::traits<Derived>::RowsAtCompileTime == 3 &&
Eigen::internal::traits<Derived>::ColsAtCompileTime == 1 &&
std::is_same<typename Eigen::internal::traits<Derived>::Scalar, Scalar>::value, int>::type = 0>
__host__ __device__ Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> rotate(const DualQuaternion<Scalar> & transform, const Eigen::MatrixBase<Derived> & vector) {
return transform.rotate(vector);
}
template <typename Scalar, typename CameraModelT, int K, template <typename, int...> class TransformT, typename ... DebugArgTs>
__global__ void computeDataNormalEquationsKernel(const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > liveVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > predictedWarpedVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > predictedWarpedNormals,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > predictedCanonicalVertices,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > predictedCanonicalNormals,
const CameraModelT cameraModel,
const Sophus::SE3Group<Scalar> updatePredictionToLive,
const VoxelGrid<Scalar,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> nearestNeighborGrid,
const DeviceTensor1<Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> > deformationGraphVertices,
const DeviceTensor1<TransformT<Scalar> > deformationGraphTransforms,
const Scalar oneOverBlendingSigmaSquared,
DeviceTensor1<internal::UpperTriangularMatrix<Scalar,6> > diagonalJTJBlocks,
DeviceTensor1<Scalar> JTr,
DeviceTensor1<int> associationCounts,
DebugArgTs ... debugArgs) {
// TODO: add normal disagreement check
typedef Eigen::Matrix<Scalar,4,1,Eigen::DontAlign> Vec4;
typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
typedef Eigen::Matrix<int,3,1> Vec3i;
typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2;
typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
typedef TransformT<Scalar> Transform;
static constexpr int border = 2; // TODO
static constexpr Scalar rayNormDotThreshold = Scalar(0.1); // TODO
static constexpr Scalar maxAssiciationDistance3D = Scalar(0.02); // TODO
const uint x = threadIdx.x + blockDim.x * blockIdx.x;
const uint y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < predictedCanonicalVertices.dimensionSize(0) && y < predictedCanonicalVertices.dimensionSize(1)) {
//PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,0,0,0),debugArgs...);
const Vec4 & canonicalVertexInGridCoords = predictedCanonicalVertices(x,y);
// ensure value is valid
if (!isfinite(canonicalVertexInGridCoords(0))) { // TODO: use + or - inf so we can use isinf instead (which doesn't need to check against NaN)
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,255,0,255),debugArgs...);
return;
}
const Vec3i nearestNeighborCanonicalVoxel = round(canonicalVertexInGridCoords.template head<3>());
// make sure the canonical vertice is in bounds
if (!nearestNeighborGrid.grid().inBounds(nearestNeighborCanonicalVoxel,0)) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,0,255,255),debugArgs...);
return;
}
const Vec3 predictedWarpedVertex = updatePredictionToLive * predictedWarpedVertices(x,y).template head<3>();
const Vec3 predictedWarpedNormal = updatePredictionToLive.so3() * predictedWarpedNormals(x,y).template head<3>();
const Vec2 projectedWarpedVertex = cameraModel.project(predictedWarpedVertex);
Eigen::Vector2i nearestDiscretePixel = round(projectedWarpedVertex);
// ensure it projects in bounds
if (!liveVertices.inBounds(nearestDiscretePixel,border)) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,0,0,255),debugArgs...);
return;
}
// TODO: eigen vector accessor
const Vec3 & liveVertex = liveVertices(nearestDiscretePixel);
// make sure the point projects on valid depth
if (liveVertex(2) <= 0 ) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,255,0,255),debugArgs...);
return;
}
// ensure the predicted and live points are close enough
if ( (liveVertex - predictedWarpedVertex).norm() > maxAssiciationDistance3D ) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,255,255,255),debugArgs...);
return;
}
const Vec3 ray = predictedWarpedVertex.normalized();
// make sure the view is not too oblique
if ( -ray.dot(predictedWarpedNormal) < rayNormDotThreshold ) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,0,255,255),debugArgs...);
return;
}
const NNVec & nearestNeighborIndices = nearestNeighborGrid(nearestNeighborCanonicalVoxel);
const Vec3 canonicalVertexInWorldCoords = nearestNeighborGrid.gridToWorld(canonicalVertexInGridCoords.template head<3>());
const Vec4 & canonicalNormal = predictedCanonicalNormals(x,y);
Scalar totalResidual(0);
// TODO: unroll?
for (int k = 0; k < K; ++k) {
const int neighborIndex = nearestNeighborIndices(k);
if (neighborIndex < 0) {
break;
}
const Vec3 & deformationGraphVertex = deformationGraphVertices(neighborIndex);
const Scalar distanceSquared = (deformationGraphVertex - canonicalVertexInWorldCoords).squaredNorm();
const Scalar weight = expX(-distanceSquared*oneOverBlendingSigmaSquared);
const Scalar weightSquared = weight * weight;
const Transform & deformationGraphTransform = deformationGraphTransforms(neighborIndex);
const Vec3 canonicalVertexOffsetFromNeighbor = deformationGraphTransform*(canonicalVertexInWorldCoords - deformationGraphVertex);
const Vec3 canonicalVertexWarpedByNeighbor = deformationGraphVertex + canonicalVertexOffsetFromNeighbor;
const Vec3 canonicalNormalWarpedByNeighbor = rotate(deformationGraphTransform, canonicalNormal.template head<3>());
// if (k == 0) {
// printf("%f,%f,%f vs %f, %f, %f\n", deformationGraphVertex(0),deformationGraphVertex(1),deformationGraphVertex(2),
// canonicalVertexInWorldCoords(0),canonicalVertexInWorldCoords(1),canonicalVertexInWorldCoords(2));
// }
// TODO: apply update?
const Vec3 residual = canonicalVertexWarpedByNeighbor - liveVertex;
const Scalar pointPlaneResidual = canonicalNormalWarpedByNeighbor.transpose()*residual;
totalResidual += pointPlaneResidual*pointPlaneResidual;
Eigen::Matrix<Scalar,3,6> dOffset_dNeighborTransformUpdate;
dOffset_dNeighborTransformUpdate << 1, 0, 0, 0, canonicalVertexOffsetFromNeighbor(2), -canonicalVertexOffsetFromNeighbor(1),
0, 1, 0, -canonicalVertexOffsetFromNeighbor(2), 0, canonicalVertexOffsetFromNeighbor(0),
0, 0, 1, canonicalVertexOffsetFromNeighbor(1), -canonicalVertexOffsetFromNeighbor(0), 0;
const Eigen::Matrix<Scalar,1,6> dError_dNeighborTransformUpdate = canonicalNormalWarpedByNeighbor.template cast<Scalar>().transpose()*dOffset_dNeighborTransformUpdate;
const internal::UpperTriangularMatrix<Scalar,6> localNeighborJTJBlock = internal::JTJInitializer<Scalar,1,6>::upperTriangularJTJ(weight * dError_dNeighborTransformUpdate);
internal::UpperTriangularMatrix<Scalar,6> & globalNeighborJTJBlock = diagonalJTJBlocks(neighborIndex);
internal::JTJAtomicAdder<Scalar,6>::atomicAdd(globalNeighborJTJBlock,localNeighborJTJBlock);
internal::VectorAtomicAdder<Scalar,6>::atomicAdd(JTr.data() + 6 * neighborIndex, weightSquared * pointPlaneResidual * dError_dNeighborTransformUpdate);
atomicAdd(&associationCounts(neighborIndex),1);
// if (neighborIndex == 98 && x == 276 && y == 232) {
//// printf("%d,%d -> %f %f %f\n",x,y,canonicalVertexOffsetFromNeighbor(0),canonicalVertexOffsetFromNeighbor(1),canonicalVertexOffsetFromNeighbor(2));
// printf("%d,%d -> %f\n",x,y,localNeighborJTJBlock.head(0));
// printf("warped vertex: %f,%f,%f\n", canonicalVertexWarpedByNeighbor(0), canonicalVertexWarpedByNeighbor(1), canonicalVertexWarpedByNeighbor(2));
// printf("predicted normal: %f,%f,%f\n", predictedWarpedNormal(0), predictedWarpedNormal(1), predictedWarpedNormal(2));
//// printf("warped normal: %f,%f,%f\n", canonicalNormalWarpedByNeighbor(0), canonicalNormalWarpedByNeighbor(1), canonicalNormalWarpedByNeighbor(2));
// }
}
// printf("%f,%f,%f -> %f, %f, %f\n", canonicalVertexInGridCoords(0),canonicalVertexInGridCoords(1),canonicalVertexInGridCoords(2),
// canonicalVertexInWorldCoords(0),canonicalVertexInWorldCoords(1),canonicalVertexInWorldCoords(2));
const uchar gray = min(Scalar(255),255 * totalResidual / (0.01*0.01) );
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(gray,gray,gray,255),debugArgs...);
// printf("%f\n",totalResidual);
}
}
namespace internal {
template <typename Scalar, typename ScalarOpt, typename CameraModelT,
template <typename,int...> class TransformT, int K,
internal::TransformUpdateMethod U, typename ... DebugArgTs>
void computeDataNormalEquations(const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > & liveVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > & predictedWarpedVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > & predictedWarpedNormals,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > & predictedCanonicalVertices,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > & predictedCanonicalNormals,
const CameraModelT & cameraModel,
NonrigidTransformer<Scalar,TransformT> & transformer,
const Sophus::SE3Group<Scalar> & updatePredictionToLive,
const Eigen::Matrix<Scalar,2,1> & depthRange,
std::vector<Eigen::Triplet<ScalarOpt> > & JTJTriplets,
Eigen::Matrix<ScalarOpt,Eigen::Dynamic,1> & JTr,
DebugArgTs ... debugArgs) {
typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
typedef Eigen::Triplet<ScalarOpt> Triplet;
static constexpr int ModelDim = 6;
static constexpr int minimumAssociationCount = 30; // TODO
const uint predictionWidth = predictedCanonicalVertices.dimensionSize(0);
const uint predictionHeight = predictedCanonicalVertices.dimensionSize(1);
assert(predictedWarpedVertices.dimensionSize(0) == predictionWidth);
assert(predictedWarpedVertices.dimensionSize(1) == predictionHeight);
assert(predictedWarpedNormals.dimensionSize(0) == predictionWidth);
assert(predictedWarpedNormals.dimensionSize(1) == predictionHeight);
const dim3 block(32,32,1); //TODO
const dim3 grid(intDivideAndCeil(predictionWidth,block.x),intDivideAndCeil(predictionHeight,block.y),1);
const int numBaseLevelVertices = transformer.numVerticesAtLevel(0);
// TODO: transformer already stores this
ManagedDeviceTensor1<Vec3> baseLevelDeformationGraphVertices(numBaseLevelVertices);
baseLevelDeformationGraphVertices.copyFrom(ConstHostTensor1<Vec3>(numBaseLevelVertices,transformer.deformationGraphVertices(0)));
ManagedDeviceTensor1<TransformT<Scalar> > baseLevelDeformationGraphTransforms(numBaseLevelVertices);
baseLevelDeformationGraphTransforms.copyFrom(ConstHostTensor1<TransformT<Scalar> >(numBaseLevelVertices,transformer.transforms(0)));
ManagedDeviceTensor1<internal::UpperTriangularMatrix<Scalar,ModelDim> > diagonalJTJBlocks(numBaseLevelVertices);
hipMemset(diagonalJTJBlocks.data(),0,diagonalJTJBlocks.dimensionSize(0)*sizeof(internal::UpperTriangularMatrix<Scalar,ModelDim>));
ManagedDeviceTensor1<Scalar> deviceJTr(numBaseLevelVertices * ModelDim);
hipMemset(deviceJTr.data(),0,deviceJTr.dimensionSize(0)*sizeof(Scalar));
ManagedDeviceTensor1<int> deviceAssociationCounts(numBaseLevelVertices);
hipMemset(deviceAssociationCounts.data(),0,deviceAssociationCounts.dimensionSize(0)*sizeof(int));
const Scalar blendingSigma = transformer.blendingSigma();
hipLaunchKernelGGL(( computeDataNormalEquationsKernel), dim3(grid),dim3(block), 0, 0, liveVertices,
predictedWarpedVertices,
predictedWarpedNormals,
predictedCanonicalVertices,
predictedCanonicalNormals,
cameraModel,
updatePredictionToLive,
transformer.nearestNeighborGrid(),
baseLevelDeformationGraphVertices,
baseLevelDeformationGraphTransforms,
Scalar(1)/(blendingSigma*blendingSigma),
diagonalJTJBlocks,
deviceJTr,
deviceAssociationCounts,
debugArgs...);
hipDeviceSynchronize();
CheckCudaDieOnError();
Eigen::Matrix<Scalar,Eigen::Dynamic,1> hostJTr(numBaseLevelVertices * ModelDim);
HostTensor1<Scalar>(numBaseLevelVertices * ModelDim, hostJTr.data()).copyFrom(deviceJTr);
JTr = hostJTr.template cast<ScalarOpt>();
ManagedHostTensor1<int> hostAssociationCounts(numBaseLevelVertices);
hostAssociationCounts.copyFrom(deviceAssociationCounts);
ManagedHostTensor1<internal::UpperTriangularMatrix<Scalar,ModelDim> > hostDiagonalJTJBlocks(diagonalJTJBlocks.dimensions());
hostDiagonalJTJBlocks.copyFrom(diagonalJTJBlocks);
JTJTriplets.reserve(ModelDim*ModelDim*numBaseLevelVertices);
for (int index = 0; index < numBaseLevelVertices; ++index) {
// make sure we have enough points to support an update to this node
if (hostAssociationCounts(index) > minimumAssociationCount) {
internal::UpperTriangularMatrix<Scalar,ModelDim> & upperTriangle = hostDiagonalJTJBlocks(index);
const Eigen::Matrix<Scalar,ModelDim,ModelDim> squareMatrix = internal::SquareMatrixReconstructor<Scalar,ModelDim>::reconstruct(upperTriangle);
// std::cout << "block " << index << ": " << std::endl;
// std::cout << squareMatrix << std::endl << std::endl;
for (int r = 0; r < ModelDim; ++r) {
for (int c = r; c < ModelDim; ++c) {
const Scalar & val = squareMatrix(r,c);
if ( val != Scalar(0) ) {
JTJTriplets.push_back(Triplet(index * ModelDim + r, index * ModelDim + c, val));
// TODO
if ( r != c) {
JTJTriplets.push_back(Triplet(index * ModelDim + c, index * ModelDim + r, val));
}
}
}
}
}
}
}
#define COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(type,type_opt,camera,transform,K,update) \
template void computeDataNormalEquations<type,type_opt,camera##CameraModel<type>,transform,K,internal::TransformUpdate##update##Multiply>( \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const camera##CameraModel<type> &, \
NonrigidTransformer<type,transform> &, \
const Sophus::SE3Group<type> &, \
const Eigen::Matrix<type,2,1> &, \
std::vector<Eigen::Triplet<type_opt> > &, \
Eigen::Matrix<type_opt,Eigen::Dynamic,1> &); \
\
template void computeDataNormalEquations<type,type_opt,camera##CameraModel<type>,transform,K,internal::TransformUpdate##update##Multiply, DeviceTensor2<Eigen::UnalignedVec4<uchar> > >( \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const camera##CameraModel<type> &, \
NonrigidTransformer<type,transform> &, \
const Sophus::SE3Group<type> &, \
const Eigen::Matrix<type,2,1> &, \
std::vector<Eigen::Triplet<type_opt> > &, \
Eigen::Matrix<type_opt,Eigen::Dynamic,1> &, \
DeviceTensor2<Eigen::UnalignedVec4<uchar> > )
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,DualQuaternion,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,DualQuaternion,4,Right);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,Sophus::SE3Group,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,Sophus::SE3Group,4,Right);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,DualQuaternion,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,DualQuaternion,4,Right);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,Sophus::SE3Group,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,Sophus::SE3Group,4,Right);
} // namespace internal
} // namespace df
| c3762408d1cf4b71251c33f272aadfebcdf71308.cu | #include <df/optimization/nonrigidIcp.h>
#include <df/camera/poly3.h> // TODO
#include <df/camera/linear.h> // TODO
#include <df/util/dualQuaternion.h> // TODO
#include <sophus/se3.hpp>
#include <df/util/cudaHelpers.h>
#include <df/util/debugHelpers.h>
namespace df {
//template <typename Scalar, template <typename, int...> class TransformT,
// typename Derived, typename std::enable_if<Eigen::internal::traits<Derived>::RowsAtCompileTime == 3 &&
// Eigen::internal::traits<Derived>::ColsAtCompileTime == 1 &&
// std::is_same<typename Eigen::internal::traits<Derived>::Scalar, Scalar>::value, int>::type = 0>
//__host__ __device__ Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> rotate(const TransformT<Scalar> & transform, const Eigen::MatrixBase<Derived> & vector);
template <typename Scalar,
typename Derived, typename std::enable_if<Eigen::internal::traits<Derived>::RowsAtCompileTime == 3 &&
Eigen::internal::traits<Derived>::ColsAtCompileTime == 1 &&
std::is_same<typename Eigen::internal::traits<Derived>::Scalar, Scalar>::value, int>::type = 0>
__host__ __device__ Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> rotate(const Sophus::SE3Group<Scalar> & transform, const Eigen::MatrixBase<Derived> & vector) {
return transform.so3() * vector;
}
template <typename Scalar,
typename Derived, typename std::enable_if<Eigen::internal::traits<Derived>::RowsAtCompileTime == 3 &&
Eigen::internal::traits<Derived>::ColsAtCompileTime == 1 &&
std::is_same<typename Eigen::internal::traits<Derived>::Scalar, Scalar>::value, int>::type = 0>
__host__ __device__ Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> rotate(const DualQuaternion<Scalar> & transform, const Eigen::MatrixBase<Derived> & vector) {
return transform.rotate(vector);
}
template <typename Scalar, typename CameraModelT, int K, template <typename, int...> class TransformT, typename ... DebugArgTs>
__global__ void computeDataNormalEquationsKernel(const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > liveVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > predictedWarpedVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > predictedWarpedNormals,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > predictedCanonicalVertices,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > predictedCanonicalNormals,
const CameraModelT cameraModel,
const Sophus::SE3Group<Scalar> updatePredictionToLive,
const VoxelGrid<Scalar,Eigen::Matrix<int,K,1,Eigen::DontAlign>,DeviceResident> nearestNeighborGrid,
const DeviceTensor1<Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> > deformationGraphVertices,
const DeviceTensor1<TransformT<Scalar> > deformationGraphTransforms,
const Scalar oneOverBlendingSigmaSquared,
DeviceTensor1<internal::UpperTriangularMatrix<Scalar,6> > diagonalJTJBlocks,
DeviceTensor1<Scalar> JTr,
DeviceTensor1<int> associationCounts,
DebugArgTs ... debugArgs) {
// TODO: add normal disagreement check
typedef Eigen::Matrix<Scalar,4,1,Eigen::DontAlign> Vec4;
typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
typedef Eigen::Matrix<int,3,1> Vec3i;
typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2;
typedef Eigen::Matrix<int,K,1,Eigen::DontAlign> NNVec;
typedef TransformT<Scalar> Transform;
static constexpr int border = 2; // TODO
static constexpr Scalar rayNormDotThreshold = Scalar(0.1); // TODO
static constexpr Scalar maxAssiciationDistance3D = Scalar(0.02); // TODO
const uint x = threadIdx.x + blockDim.x * blockIdx.x;
const uint y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < predictedCanonicalVertices.dimensionSize(0) && y < predictedCanonicalVertices.dimensionSize(1)) {
//PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,0,0,0),debugArgs...);
const Vec4 & canonicalVertexInGridCoords = predictedCanonicalVertices(x,y);
// ensure value is valid
if (!isfinite(canonicalVertexInGridCoords(0))) { // TODO: use + or - inf so we can use isinf instead (which doesn't need to check against NaN)
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,255,0,255),debugArgs...);
return;
}
const Vec3i nearestNeighborCanonicalVoxel = round(canonicalVertexInGridCoords.template head<3>());
// make sure the canonical vertice is in bounds
if (!nearestNeighborGrid.grid().inBounds(nearestNeighborCanonicalVoxel,0)) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,0,255,255),debugArgs...);
return;
}
const Vec3 predictedWarpedVertex = updatePredictionToLive * predictedWarpedVertices(x,y).template head<3>();
const Vec3 predictedWarpedNormal = updatePredictionToLive.so3() * predictedWarpedNormals(x,y).template head<3>();
const Vec2 projectedWarpedVertex = cameraModel.project(predictedWarpedVertex);
Eigen::Vector2i nearestDiscretePixel = round(projectedWarpedVertex);
// ensure it projects in bounds
if (!liveVertices.inBounds(nearestDiscretePixel,border)) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(255,0,0,255),debugArgs...);
return;
}
// TODO: eigen vector accessor
const Vec3 & liveVertex = liveVertices(nearestDiscretePixel);
// make sure the point projects on valid depth
if (liveVertex(2) <= 0 ) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,255,0,255),debugArgs...);
return;
}
// ensure the predicted and live points are close enough
if ( (liveVertex - predictedWarpedVertex).norm() > maxAssiciationDistance3D ) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,255,255,255),debugArgs...);
return;
}
const Vec3 ray = predictedWarpedVertex.normalized();
// make sure the view is not too oblique
if ( -ray.dot(predictedWarpedNormal) < rayNormDotThreshold ) {
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(0,0,255,255),debugArgs...);
return;
}
const NNVec & nearestNeighborIndices = nearestNeighborGrid(nearestNeighborCanonicalVoxel);
const Vec3 canonicalVertexInWorldCoords = nearestNeighborGrid.gridToWorld(canonicalVertexInGridCoords.template head<3>());
const Vec4 & canonicalNormal = predictedCanonicalNormals(x,y);
Scalar totalResidual(0);
// TODO: unroll?
for (int k = 0; k < K; ++k) {
const int neighborIndex = nearestNeighborIndices(k);
if (neighborIndex < 0) {
break;
}
const Vec3 & deformationGraphVertex = deformationGraphVertices(neighborIndex);
const Scalar distanceSquared = (deformationGraphVertex - canonicalVertexInWorldCoords).squaredNorm();
const Scalar weight = expX(-distanceSquared*oneOverBlendingSigmaSquared);
const Scalar weightSquared = weight * weight;
const Transform & deformationGraphTransform = deformationGraphTransforms(neighborIndex);
const Vec3 canonicalVertexOffsetFromNeighbor = deformationGraphTransform*(canonicalVertexInWorldCoords - deformationGraphVertex);
const Vec3 canonicalVertexWarpedByNeighbor = deformationGraphVertex + canonicalVertexOffsetFromNeighbor;
const Vec3 canonicalNormalWarpedByNeighbor = rotate(deformationGraphTransform, canonicalNormal.template head<3>());
// if (k == 0) {
// printf("%f,%f,%f vs %f, %f, %f\n", deformationGraphVertex(0),deformationGraphVertex(1),deformationGraphVertex(2),
// canonicalVertexInWorldCoords(0),canonicalVertexInWorldCoords(1),canonicalVertexInWorldCoords(2));
// }
// TODO: apply update?
const Vec3 residual = canonicalVertexWarpedByNeighbor - liveVertex;
const Scalar pointPlaneResidual = canonicalNormalWarpedByNeighbor.transpose()*residual;
totalResidual += pointPlaneResidual*pointPlaneResidual;
Eigen::Matrix<Scalar,3,6> dOffset_dNeighborTransformUpdate;
dOffset_dNeighborTransformUpdate << 1, 0, 0, 0, canonicalVertexOffsetFromNeighbor(2), -canonicalVertexOffsetFromNeighbor(1),
0, 1, 0, -canonicalVertexOffsetFromNeighbor(2), 0, canonicalVertexOffsetFromNeighbor(0),
0, 0, 1, canonicalVertexOffsetFromNeighbor(1), -canonicalVertexOffsetFromNeighbor(0), 0;
const Eigen::Matrix<Scalar,1,6> dError_dNeighborTransformUpdate = canonicalNormalWarpedByNeighbor.template cast<Scalar>().transpose()*dOffset_dNeighborTransformUpdate;
const internal::UpperTriangularMatrix<Scalar,6> localNeighborJTJBlock = internal::JTJInitializer<Scalar,1,6>::upperTriangularJTJ(weight * dError_dNeighborTransformUpdate);
internal::UpperTriangularMatrix<Scalar,6> & globalNeighborJTJBlock = diagonalJTJBlocks(neighborIndex);
internal::JTJAtomicAdder<Scalar,6>::atomicAdd(globalNeighborJTJBlock,localNeighborJTJBlock);
internal::VectorAtomicAdder<Scalar,6>::atomicAdd(JTr.data() + 6 * neighborIndex, weightSquared * pointPlaneResidual * dError_dNeighborTransformUpdate);
atomicAdd(&associationCounts(neighborIndex),1);
// if (neighborIndex == 98 && x == 276 && y == 232) {
//// printf("%d,%d -> %f %f %f\n",x,y,canonicalVertexOffsetFromNeighbor(0),canonicalVertexOffsetFromNeighbor(1),canonicalVertexOffsetFromNeighbor(2));
// printf("%d,%d -> %f\n",x,y,localNeighborJTJBlock.head(0));
// printf("warped vertex: %f,%f,%f\n", canonicalVertexWarpedByNeighbor(0), canonicalVertexWarpedByNeighbor(1), canonicalVertexWarpedByNeighbor(2));
// printf("predicted normal: %f,%f,%f\n", predictedWarpedNormal(0), predictedWarpedNormal(1), predictedWarpedNormal(2));
//// printf("warped normal: %f,%f,%f\n", canonicalNormalWarpedByNeighbor(0), canonicalNormalWarpedByNeighbor(1), canonicalNormalWarpedByNeighbor(2));
// }
}
// printf("%f,%f,%f -> %f, %f, %f\n", canonicalVertexInGridCoords(0),canonicalVertexInGridCoords(1),canonicalVertexInGridCoords(2),
// canonicalVertexInWorldCoords(0),canonicalVertexInWorldCoords(1),canonicalVertexInWorldCoords(2));
const uchar gray = min(Scalar(255),255 * totalResidual / (0.01*0.01) );
PixelDebugger<DebugArgTs...>::debugPixel(Eigen::Vector2i(x,y),Eigen::UnalignedVec4<uchar>(gray,gray,gray,255),debugArgs...);
// printf("%f\n",totalResidual);
}
}
namespace internal {
template <typename Scalar, typename ScalarOpt, typename CameraModelT,
template <typename,int...> class TransformT, int K,
internal::TransformUpdateMethod U, typename ... DebugArgTs>
void computeDataNormalEquations(const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > & liveVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > & predictedWarpedVertices,
const DeviceTensor2<Eigen::UnalignedVec3<Scalar> > & predictedWarpedNormals,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > & predictedCanonicalVertices,
const DeviceTensor2<Eigen::UnalignedVec4<Scalar> > & predictedCanonicalNormals,
const CameraModelT & cameraModel,
NonrigidTransformer<Scalar,TransformT> & transformer,
const Sophus::SE3Group<Scalar> & updatePredictionToLive,
const Eigen::Matrix<Scalar,2,1> & depthRange,
std::vector<Eigen::Triplet<ScalarOpt> > & JTJTriplets,
Eigen::Matrix<ScalarOpt,Eigen::Dynamic,1> & JTr,
DebugArgTs ... debugArgs) {
typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3;
typedef Eigen::Triplet<ScalarOpt> Triplet;
static constexpr int ModelDim = 6;
static constexpr int minimumAssociationCount = 30; // TODO
const uint predictionWidth = predictedCanonicalVertices.dimensionSize(0);
const uint predictionHeight = predictedCanonicalVertices.dimensionSize(1);
assert(predictedWarpedVertices.dimensionSize(0) == predictionWidth);
assert(predictedWarpedVertices.dimensionSize(1) == predictionHeight);
assert(predictedWarpedNormals.dimensionSize(0) == predictionWidth);
assert(predictedWarpedNormals.dimensionSize(1) == predictionHeight);
const dim3 block(32,32,1); //TODO
const dim3 grid(intDivideAndCeil(predictionWidth,block.x),intDivideAndCeil(predictionHeight,block.y),1);
const int numBaseLevelVertices = transformer.numVerticesAtLevel(0);
// TODO: transformer already stores this
ManagedDeviceTensor1<Vec3> baseLevelDeformationGraphVertices(numBaseLevelVertices);
baseLevelDeformationGraphVertices.copyFrom(ConstHostTensor1<Vec3>(numBaseLevelVertices,transformer.deformationGraphVertices(0)));
ManagedDeviceTensor1<TransformT<Scalar> > baseLevelDeformationGraphTransforms(numBaseLevelVertices);
baseLevelDeformationGraphTransforms.copyFrom(ConstHostTensor1<TransformT<Scalar> >(numBaseLevelVertices,transformer.transforms(0)));
ManagedDeviceTensor1<internal::UpperTriangularMatrix<Scalar,ModelDim> > diagonalJTJBlocks(numBaseLevelVertices);
cudaMemset(diagonalJTJBlocks.data(),0,diagonalJTJBlocks.dimensionSize(0)*sizeof(internal::UpperTriangularMatrix<Scalar,ModelDim>));
ManagedDeviceTensor1<Scalar> deviceJTr(numBaseLevelVertices * ModelDim);
cudaMemset(deviceJTr.data(),0,deviceJTr.dimensionSize(0)*sizeof(Scalar));
ManagedDeviceTensor1<int> deviceAssociationCounts(numBaseLevelVertices);
cudaMemset(deviceAssociationCounts.data(),0,deviceAssociationCounts.dimensionSize(0)*sizeof(int));
const Scalar blendingSigma = transformer.blendingSigma();
computeDataNormalEquationsKernel<<<grid,block>>>(liveVertices,
predictedWarpedVertices,
predictedWarpedNormals,
predictedCanonicalVertices,
predictedCanonicalNormals,
cameraModel,
updatePredictionToLive,
transformer.nearestNeighborGrid(),
baseLevelDeformationGraphVertices,
baseLevelDeformationGraphTransforms,
Scalar(1)/(blendingSigma*blendingSigma),
diagonalJTJBlocks,
deviceJTr,
deviceAssociationCounts,
debugArgs...);
cudaDeviceSynchronize();
CheckCudaDieOnError();
Eigen::Matrix<Scalar,Eigen::Dynamic,1> hostJTr(numBaseLevelVertices * ModelDim);
HostTensor1<Scalar>(numBaseLevelVertices * ModelDim, hostJTr.data()).copyFrom(deviceJTr);
JTr = hostJTr.template cast<ScalarOpt>();
ManagedHostTensor1<int> hostAssociationCounts(numBaseLevelVertices);
hostAssociationCounts.copyFrom(deviceAssociationCounts);
ManagedHostTensor1<internal::UpperTriangularMatrix<Scalar,ModelDim> > hostDiagonalJTJBlocks(diagonalJTJBlocks.dimensions());
hostDiagonalJTJBlocks.copyFrom(diagonalJTJBlocks);
JTJTriplets.reserve(ModelDim*ModelDim*numBaseLevelVertices);
for (int index = 0; index < numBaseLevelVertices; ++index) {
// make sure we have enough points to support an update to this node
if (hostAssociationCounts(index) > minimumAssociationCount) {
internal::UpperTriangularMatrix<Scalar,ModelDim> & upperTriangle = hostDiagonalJTJBlocks(index);
const Eigen::Matrix<Scalar,ModelDim,ModelDim> squareMatrix = internal::SquareMatrixReconstructor<Scalar,ModelDim>::reconstruct(upperTriangle);
// std::cout << "block " << index << ": " << std::endl;
// std::cout << squareMatrix << std::endl << std::endl;
for (int r = 0; r < ModelDim; ++r) {
for (int c = r; c < ModelDim; ++c) {
const Scalar & val = squareMatrix(r,c);
if ( val != Scalar(0) ) {
JTJTriplets.push_back(Triplet(index * ModelDim + r, index * ModelDim + c, val));
// TODO
if ( r != c) {
JTJTriplets.push_back(Triplet(index * ModelDim + c, index * ModelDim + r, val));
}
}
}
}
}
}
}
#define COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(type,type_opt,camera,transform,K,update) \
template void computeDataNormalEquations<type,type_opt,camera##CameraModel<type>,transform,K,internal::TransformUpdate##update##Multiply>( \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const camera##CameraModel<type> &, \
NonrigidTransformer<type,transform> &, \
const Sophus::SE3Group<type> &, \
const Eigen::Matrix<type,2,1> &, \
std::vector<Eigen::Triplet<type_opt> > &, \
Eigen::Matrix<type_opt,Eigen::Dynamic,1> &); \
\
template void computeDataNormalEquations<type,type_opt,camera##CameraModel<type>,transform,K,internal::TransformUpdate##update##Multiply, DeviceTensor2<Eigen::UnalignedVec4<uchar> > >( \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec3<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const DeviceTensor2<Eigen::UnalignedVec4<type> > &, \
const camera##CameraModel<type> &, \
NonrigidTransformer<type,transform> &, \
const Sophus::SE3Group<type> &, \
const Eigen::Matrix<type,2,1> &, \
std::vector<Eigen::Triplet<type_opt> > &, \
Eigen::Matrix<type_opt,Eigen::Dynamic,1> &, \
DeviceTensor2<Eigen::UnalignedVec4<uchar> > )
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,DualQuaternion,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,DualQuaternion,4,Right);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,Sophus::SE3Group,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Poly3,Sophus::SE3Group,4,Right);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,DualQuaternion,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,DualQuaternion,4,Right);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,Sophus::SE3Group,4,Left);
COMPUTE_DATA_NORMAL_EQUATIONS_EXPLICIT_INSTANTIATION(float,double,Linear,Sophus::SE3Group,4,Right);
} // namespace internal
} // namespace df
|
5c32b65062d5209e51bf8f43eb43db6ddb732f05.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *max = NULL;
hipMalloc(&max, XSIZE*YSIZE);
float *var = NULL;
hipMalloc(&var, XSIZE*YSIZE);
int *top = NULL;
hipMalloc(&top, XSIZE*YSIZE);
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, max,var,top,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, max,var,top,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, max,var,top,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5c32b65062d5209e51bf8f43eb43db6ddb732f05.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *max = NULL;
cudaMalloc(&max, XSIZE*YSIZE);
float *var = NULL;
cudaMalloc(&var, XSIZE*YSIZE);
int *top = NULL;
cudaMalloc(&top, XSIZE*YSIZE);
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel<<<gridBlock,threadBlock>>>(max,var,top,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel<<<gridBlock,threadBlock>>>(max,var,top,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel<<<gridBlock,threadBlock>>>(max,var,top,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
da41856389013d26876fae3c1ac5b28de6fab308.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__fillToInds.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double A = 2;
double *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int *I = NULL;
hipMalloc(&I, XSIZE*YSIZE);
long long len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__fillToInds), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,I,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__fillToInds), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,I,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__fillToInds), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,I,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | da41856389013d26876fae3c1ac5b28de6fab308.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__fillToInds.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double A = 2;
double *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int *I = NULL;
cudaMalloc(&I, XSIZE*YSIZE);
long long len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__fillToInds<<<gridBlock,threadBlock>>>(A,B,I,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__fillToInds<<<gridBlock,threadBlock>>>(A,B,I,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__fillToInds<<<gridBlock,threadBlock>>>(A,B,I,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3bbc1ec79a66425f4b0a02730ffb77a038e15375.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "classify_cuda.cuh"
/*
* Arguments:
* data: Memory that contains both the review LSA coefficients and the labels.
* Format decided by implementation of classify.
* batch_size: Size of mini-batch, how many elements to process at once
* step_size: Step size for gradient descent. Tune this as needed. 1.0 is sane
* default.
* weights: Pointer to weights vector of length REVIEW_DIM.
* errors: Pointer to a single float used to describe the error for the batch.
* An output variable for the kernel. The kernel can either write the
* value of loss function over the batch or the misclassification rate
* in the batch to errors.
*/
__global__
void trainLogRegKernel(float *data, int batch_size, int step_size,
float *weights, float *errors,
float *num, float *grad) {
/*
* Reminder to self: accessing data element X for thread Y is
* = data[Y + (X * batch_size)]
*/
// DONE: write me
// For each batch of reviews
unsigned int review = blockIdx.x * blockDim.x + threadIdx.x;
// For each review
while (review < batch_size){
/* Applying the gradient descent */
// These float initializations are for calculating the denominator
float exponent = 0;
float denom = 0;
// Initialize int to keep track of errors
int errorcount = 0;
// Calculating the numerator and denominator
for (int i = 0; i < REVIEW_DIM; i++){
// Calculating the numerator
num[review + (i * batch_size)] = data[review + (i * batch_size)] *
data[review + (REVIEW_DIM * batch_size)];
// Calculating the denominator
exponent += weights[i] * data[review + (i * batch_size)];
}
denom = (1 + exp(data[review + (REVIEW_DIM * batch_size)] *
exponent)) * -batch_size;
// Calculating the gradient
for (int i = 0; i < REVIEW_DIM; i++){
atomicAdd(&grad[review + (i * batch_size)],
num[review + (i * batch_size)] / denom);
}
/* Calculating prediction */
if ((exponent > 0 && data[review + (REVIEW_DIM * batch_size)] < 0)
|| (exponent < 0 && data[review + (REVIEW_DIM * batch_size)] > 0)){
errorcount++;
}
// Sync threads before updating weight
__syncthreads();
/* Updating the weight */
for (int i = 0; i < REVIEW_DIM; i++){
atomicAdd(&weights[i], -step_size *
grad[review + (i * batch_size)]);
}
// Calculate final error
atomicAdd(errors, (float) errorcount / batch_size);
review += blockDim.x * gridDim.x;
}
}
/*
* All parameters have the same meaning as in docstring for trainLogRegKernel.
* Notably, cudaClassify returns a float that quantifies the error in the
* minibatch. This error should go down as more training occurs.
*/
float cudaClassify(float *data, int batch_size,
float step_size, float *weights,
float *num, float *grad) {
int block_size = (batch_size < 512) ? batch_size : 512;
// grid_size = CEIL(batch_size / block_size)
int grid_size = (batch_size + block_size - 1) / block_size;
int shmem_bytes = 0;
float *d_errors;
hipMalloc(&d_errors, sizeof(float));
hipMemset(d_errors, 0, sizeof(float));
hipMemset(grad, 0, (REVIEW_DIM + 1) * batch_size * sizeof(float));
hipLaunchKernelGGL(( trainLogRegKernel), dim3(grid_size), dim3(block_size), shmem_bytes, 0, data,
batch_size,
step_size,
weights,
d_errors,
num,
grad);
float h_errors = -1.0;
hipMemcpy(&h_errors, d_errors, sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_errors);
return h_errors;
}
| 3bbc1ec79a66425f4b0a02730ffb77a038e15375.cu | #include <cassert>
#include <cmath>
#include <cstdio>
#include <cuda_runtime.h>
#include "classify_cuda.cuh"
/*
* Arguments:
* data: Memory that contains both the review LSA coefficients and the labels.
* Format decided by implementation of classify.
* batch_size: Size of mini-batch, how many elements to process at once
* step_size: Step size for gradient descent. Tune this as needed. 1.0 is sane
* default.
* weights: Pointer to weights vector of length REVIEW_DIM.
* errors: Pointer to a single float used to describe the error for the batch.
* An output variable for the kernel. The kernel can either write the
* value of loss function over the batch or the misclassification rate
* in the batch to errors.
*/
__global__
void trainLogRegKernel(float *data, int batch_size, int step_size,
float *weights, float *errors,
float *num, float *grad) {
/*
* Reminder to self: accessing data element X for thread Y is
* = data[Y + (X * batch_size)]
*/
// DONE: write me
// For each batch of reviews
unsigned int review = blockIdx.x * blockDim.x + threadIdx.x;
// For each review
while (review < batch_size){
/* Applying the gradient descent */
// These float initializations are for calculating the denominator
float exponent = 0;
float denom = 0;
// Initialize int to keep track of errors
int errorcount = 0;
// Calculating the numerator and denominator
for (int i = 0; i < REVIEW_DIM; i++){
// Calculating the numerator
num[review + (i * batch_size)] = data[review + (i * batch_size)] *
data[review + (REVIEW_DIM * batch_size)];
// Calculating the denominator
exponent += weights[i] * data[review + (i * batch_size)];
}
denom = (1 + exp(data[review + (REVIEW_DIM * batch_size)] *
exponent)) * -batch_size;
// Calculating the gradient
for (int i = 0; i < REVIEW_DIM; i++){
atomicAdd(&grad[review + (i * batch_size)],
num[review + (i * batch_size)] / denom);
}
/* Calculating prediction */
if ((exponent > 0 && data[review + (REVIEW_DIM * batch_size)] < 0)
|| (exponent < 0 && data[review + (REVIEW_DIM * batch_size)] > 0)){
errorcount++;
}
// Sync threads before updating weight
__syncthreads();
/* Updating the weight */
for (int i = 0; i < REVIEW_DIM; i++){
atomicAdd(&weights[i], -step_size *
grad[review + (i * batch_size)]);
}
// Calculate final error
atomicAdd(errors, (float) errorcount / batch_size);
review += blockDim.x * gridDim.x;
}
}
/*
* All parameters have the same meaning as in docstring for trainLogRegKernel.
* Notably, cudaClassify returns a float that quantifies the error in the
* minibatch. This error should go down as more training occurs.
*/
float cudaClassify(float *data, int batch_size,
float step_size, float *weights,
float *num, float *grad) {
int block_size = (batch_size < 512) ? batch_size : 512;
// grid_size = CEIL(batch_size / block_size)
int grid_size = (batch_size + block_size - 1) / block_size;
int shmem_bytes = 0;
float *d_errors;
cudaMalloc(&d_errors, sizeof(float));
cudaMemset(d_errors, 0, sizeof(float));
cudaMemset(grad, 0, (REVIEW_DIM + 1) * batch_size * sizeof(float));
trainLogRegKernel<<<grid_size, block_size, shmem_bytes>>>(data,
batch_size,
step_size,
weights,
d_errors,
num,
grad);
float h_errors = -1.0;
cudaMemcpy(&h_errors, d_errors, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_errors);
return h_errors;
}
|
5b006e5a252c8944d9fd933b29bc40a2cfc1ec41.hip | // !!! This is a file automatically generated by hipify!!!
#include "operators.h"
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/set_operations.h>
#include "moderngpu/src/moderngpu/kernel_join.hxx"
struct is_even
{
__host__ __device__
bool operator()(const int &x)
{
return (x % 2) == 0;
}
};
using namespace mgpu;
using namespace std;
using namespace thrust::placeholders;
size_t int_size = sizeof(int_type);
size_t float_size = sizeof(float_type);
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
unsigned int distinct_cnt = 0;
unsigned int join_col_cnt = 0;
unsigned int join_tab_cnt = 0;
unsigned int tab_cnt = 0;
queue<string> op_join;
queue<char> join_type;
queue<char> join_eq_type;
unsigned int partition_count;
map<string,unsigned int> stat;
map<unsigned int, unsigned int> join_and_cnt;
map<string, map<string, bool> > used_vars;
bool save_dict = 0;
thrust::device_vector<unsigned char> scratch;
map<string, string> filter_var;
thrust::device_vector<int> ranj;
unsigned long long int currtime;
standard_context_t context;
void check_used_vars()
{
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
auto s = (*it).second;
auto vars(op_value);
while(!vars.empty()) {
if(s.count(vars.front()) != 0) {
used_vars[(*it).first][vars.front()] = 1;
};
vars.pop();
}
};
}
void emit_name(const char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(const int val)
{
op_nums.push(val);
}
void emit_string(const char *str)
{ // remove the float_type quotes
if(str[0] == '"') {
string sss(str,1, strlen(str)-2);
op_value.push(sss);
}
else {
string sss(str);
op_value.push(sss);
};
op_type.push("STRING");
}
void emit_string_grp(const char *str, const char *str_grp)
{
emit_string(str);
grp_val = str_grp;
};
void emit_fieldname(const char* name1, const char* name2)
{
string s1(name1);
string s2(name2);
op_type.push("FIELD");
op_value.push(s1 + "." + s2);
};
void emit_number(const int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
op_nums_precision.push(0);
}
void emit_float(const float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(const char* str)
{
op_type.push("NUMBER");
string s1(str);
unsigned int precision;
auto pos = s1.find(".");
if(pos == std::string::npos)
precision = 0;
else {
precision = (s1.length() - pos) -1;
s1.erase(pos,1);
};
op_nums.push(stoi(s1));
op_nums_precision.push(precision);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
unsigned int misses = 0;
void emit_and()
{
op_type.push("AND");
join_col_cnt++;
}
void emit_eq()
{
op_type.push("JOIN");
join_eq_type.push('E');
if(misses == 0) {
join_and_cnt[tab_cnt] = join_col_cnt;
misses = join_col_cnt;
join_col_cnt = 0;
tab_cnt++;
}
else {
misses--;
}
}
void emit_neq()
{
op_type.push("JOIN");
join_eq_type.push('N');
if(misses == 0) {
join_and_cnt[tab_cnt] = join_col_cnt;
misses = join_col_cnt;
join_col_cnt = 0;
tab_cnt++;
}
else {
misses--;
}
}
void emit_distinct()
{
op_type.push("DISTINCT");
distinct_cnt++;
}
void emit_year()
{
op_type.push("YEAR");
}
void emit_month()
{
op_type.push("MONTH");
}
void emit_day()
{
op_type.push("DAY");
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(const char *s, ...)
{
}
void emit_var(const char *s, const int c, const char *f, const char* ref, const char* ref_name)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(const char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(const char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_sort(const char *s, const int p)
{
op_sort.push(s);
partition_count = p;
}
void emit_presort(const char *s)
{
op_presort.push(s);
}
void emit_varchar(const char *s, const int c, const char *f, const int d, const char *ref, const char* ref_name)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_vardecimal(const char *s, const int c, const char *f, const int scale, const int precision)
{
namevars.push(s);
typevars.push(f);
sizevars.push(precision);
cols.push(c);
}
void emit_sel_name(const char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(const char *s, const char tp)
{
op_join.push(s);
join_tab_cnt++;
join_type.push(tp);
};
void order_inplace_host(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str)
{
unsigned int* permutation = new unsigned int[a->mRecCount];
thrust::sequence(permutation, permutation + a->mRecCount);
char* temp = new char[a->mRecCount*max_char(a)];
stack<string> exe_type1(exe_type), exe_value;
while(!exe_type1.empty()) {
exe_value.push("ASC");
exe_type1.pop();
};
// sort on host
for(;!exe_type.empty(); exe_type.pop(),exe_value.pop()) {
if (a->type[exe_type.top()] != 1)
update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp);
else
update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp);
};
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
if (a->type[*it] != 1) {
thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_int[*it].data(), (int_type*)temp);
thrust::copy((int_type*)temp, (int_type*)temp + a->mRecCount, a->h_columns_int[*it].data());
}
else {
thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_float[*it].data(), (float_type*)temp);
thrust::copy((float_type*)temp, (float_type*)temp + a->mRecCount, a->h_columns_float[*it].data());
}
};
delete [] temp;
delete [] permutation;
}
void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str)
{
if(scratch.size() < a->mRecCount*4)
scratch.resize(a->mRecCount*4);
thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::sequence(permutation, permutation+a->mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
if(a->grp.size() < a->mRecCount*8)
a->grp.resize(a->mRecCount*8);
unsigned int bits;
for(; !exe_type.empty(); exe_type.pop()) {
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[exe_type.top()];
if (a->type[exe_type.top()] != 1) {
update_permutation(a->d_columns_int[exe_type.top()], raw_ptr, a->mRecCount, "ASC", (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
}
else
update_permutation(a->d_columns_float[exe_type.top()], raw_ptr, a->mRecCount,"ASC", (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
};
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[*it];
if (a->type[*it] != 1) {
apply_permutation(a->d_columns_int[*it], raw_ptr, a->mRecCount, (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
}
else {
apply_permutation(a->d_columns_float[*it], raw_ptr, a->mRecCount, (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
};
};
}
bool check_star_join(const string j1)
{
auto op_vals(op_value);
for(auto i=0; i < sel_count; i++) {
op_vals.pop();
op_vals.pop();
};
if(join_tab_cnt > 0) {
while(op_vals.size()) {
if (std::find(varNames[j1]->columnNames.begin(), varNames[j1]->columnNames.end(), op_vals.front()) != varNames[j1]->columnNames.end()) {
op_vals.pop();
op_vals.pop();
}
else {
return 0;
};
};
if(join_tab_cnt == 1) {
if(!check_bitmap_file_exist(varNames[j1], varNames[op_join.front()])) {
return 0;
};
};
return 1;
}
else
return 0;
}
void star_join(const char *s, const string j1)
{
map<string,bool> already_copied;
queue<string> op_left;
CudaSet* left = varNames.find(j1)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(auto i=0; i < sel_count; i++) {
if(std::find(left->columnNames.begin(), left->columnNames.end(), op_value.front()) != left->columnNames.end())
op_left.push(op_value.front());
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
auto op_sel_s(op_sel), op_sel_s_as(op_sel_as), op_g(op_value);
CudaSet* c = new CudaSet(op_sel_s, op_sel_s_as);
string f1, f2;
map<string, string> key_map;
map<string, char> sort_map;
map<string, string> r_map;
for(auto i = 0; i < join_tab_cnt; i++) {
f1 = op_g.front();
op_g.pop();
f2 = op_g.front();
op_g.pop();
r_map[f1] = f2;
queue<string> op_jj(op_join);
for(auto z = 0; z < (join_tab_cnt-1) - i; z++)
op_jj.pop();
size_t rcount;
queue<string> op_vd(op_g), op_alt(op_sel);
unsigned int jc = join_col_cnt;
while(jc) {
jc--;
op_vd.pop();
op_alt.push(op_vd.front());
op_vd.pop();
};
key_map[op_jj.front()] = f1;
CudaSet* right = varNames.find(op_jj.front())->second;
if(!check_bitmaps_exist(left, right)) {
cout << "Required bitmap on table " << op_jj.front() << " doesn't exists" << endl;
exit(0);
};
queue<string> second;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front()) != 0 && std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
second.push(op_alt.front());
//cout << "col " << op_alt.front() << " " << op_jj.front() << endl;
op_left.push(f1);
};
op_alt.pop();
};
if(!second.empty()) {
right->filtered = 0;
right->mRecCount = right->maxRecs;
load_queue(second, right, "", rcount, 0, right->segCount, 0,0); // put all used columns into GPU
};
};
queue<string> idx;
set<string> already_loaded;
bool right_cpy = 0;
for (unsigned int i = 0; i < left->segCount; i++) {
std::clock_t start2 = std::clock();
if(verbose)
cout << "segment " << i << " " << getFreeMem() << endl;
idx = left->fil_value;
already_loaded.clear();
while(!idx.empty()) {
//load the index
if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) {
//extract table name and colname from index name
already_loaded.insert(idx.front());
size_t pos1 = idx.front().find_first_of(".", 0);
size_t pos2 = idx.front().find_first_of(".", pos1+1);
CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second;
char a;
//cout << "loading index " << idx.front() << endl;
a = left->loadIndex(idx.front(), i);
sort_map[idx.front().substr(pos1+1, pos2-pos1-1)] = a;
};
idx.pop();
};
left->filtered = 0;
size_t cnt_c = 0;
allocColumns(left, left->fil_value);
copyColumns(left, left->fil_value, i, cnt_c);
bool* res = filter(left->fil_type, left->fil_value, left->fil_nums, left->fil_nums_f, left->fil_nums_precision, left, i);
thrust::device_ptr<bool> star((bool*)res);
size_t cnt = thrust::count(star, star + (unsigned int)left->mRecCount, 1);
//cout << "join res " << cnt << " out of " << left->mRecCount << endl;
thrust::host_vector<unsigned int> prm_vh(cnt);
thrust::device_vector<unsigned int> prm_v(cnt);
thrust::host_vector<unsigned int> prm_tmp(cnt);
thrust::device_vector<unsigned int> prm_tmp_d(cnt);
//std::cout<< "seg filter " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
if(cnt) { //gather
//start1 = std::clock();
left->prm_d.resize(cnt);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)left->mRecCount-1),
star, left->prm_d.begin(), thrust::identity<bool>());
thrust::device_free(star);
prm_vh = left->prm_d;
size_t offset = c->mRecCount;
c->resize_join(cnt);
queue<string> op_sel1(op_sel_s);
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, cnt*max_char(c)));
hipMemset(temp,0,cnt*max_char(c));
CudaSet *t;
unsigned int cnt1, bits;
int_type lower_val;
thrust::device_vector<unsigned int> output(cnt);
//std::cout<< "seg start " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
while(!op_sel1.empty()) {
if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end()) {
if(left->filtered)
t = varNames[left->source_name];
else
t = left;
if(left->type[op_sel1.front()] <= 1) {
if(ssd && !interactive) {
//start1 = std::clock();
lower_val = t->readSsdSegmentsFromFile(i, op_sel1.front(), offset, prm_vh, c);
//std::cout<< "SSD L SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl;
}
else {
t->readSegmentsFromFile(i, op_sel1.front());
void* h;
if(!interactive) {
if(left->type[op_sel1.front()] == 0)
h = t->h_columns_int[op_sel1.front()].data();
else
h = t->h_columns_float[op_sel1.front()].data();
}
else {
string ff = t->load_file_name + "." + op_sel1.front()+ "." + to_string(i);
h = buffers[ff];
};
cnt1 = ((unsigned int*)h)[0];//bytes
lower_val = ((int_type*)(((unsigned int*)h)+1))[0];
bits = ((unsigned int*)((char*)h + cnt1))[8];
//cout << cnt1 << " " << lower_val << " " << bits << " " << left->type[op_sel1.front()] << endl;
if(bits == 8) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), ptr + offset);
};
}
else if(bits == 16) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), ptr + offset);
};
}
else if(bits == 32) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), ptr + offset);
}
}
else if(bits == 64) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), ptr + offset);
};
};
};
if(left->type[op_sel1.front()] != 1)
thrust::transform( c->h_columns_int[op_sel1.front()].begin() + offset, c->h_columns_int[op_sel1.front()].begin() + offset + cnt,
thrust::make_constant_iterator(lower_val), c->h_columns_int[op_sel1.front()].begin() + offset, thrust::plus<int_type>());
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::transform(ptr + offset, ptr + offset + cnt,
thrust::make_constant_iterator(lower_val), ptr + offset, thrust::plus<int_type>());
thrust::transform(ptr + offset, ptr + offset + cnt, c->h_columns_float[op_sel1.front()].begin() + offset, long_to_float());
};
}
else { //gather string. There are no strings in fact tables.
};
}
else {
for(auto it = key_map.begin(); it != key_map.end(); it++) {
CudaSet* r = varNames.find(it->first)->second;
if(std::find(r->columnNames.begin(), r->columnNames.end(), op_sel1.front()) != r->columnNames.end()) {
if(i == 0) {
if(data_dict[varNames[it->first]->load_file_name][op_sel1.front()].col_type == 2) {
//cout << "SET " << op_sel1.front() << " to " << varNames[it->first]->load_file_name + "." + op_sel1.front() << endl;
c->string_map[op_sel1.front()] = varNames[it->first]->load_file_name + "." + op_sel1.front();
};
}
if(left->filtered)
t = varNames[left->source_name];
else
t = left;
if(ssd && !interactive) {
//start1 = std::clock();
lower_val = t->readSsdSegmentsFromFileR(i, key_map[it->first], prm_vh, prm_tmp);
//std::cout<< "SSD R SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl;
}
else {
t->readSegmentsFromFile(i, key_map[it->first]);
void* h;
if(!interactive) {
h = t->h_columns_int[key_map[it->first]].data();
}
else {
string ff = t->load_file_name + "." + key_map[it->first] + "." + to_string(i);
h = buffers[ff];
};
cnt1 = ((unsigned int*)h)[0];
lower_val = ((int_type*)(((unsigned int*)h)+1))[0];
bits = ((unsigned int*)((char*)h + cnt1))[8];
//cout << cnt1 << " " << lower_val << " " << bits << endl;
if(bits == 8) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), prm_tmp.begin());
}
else if(bits == 16) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), prm_tmp.begin());
}
else if(bits == 32) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), prm_tmp.begin());
}
else if(bits == 64) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), prm_tmp.begin());
};
};
if(lower_val != 1)
thrust::transform(prm_tmp.begin(), prm_tmp.end(), thrust::make_constant_iterator(lower_val-1), prm_tmp.begin(), thrust::plus<unsigned int>());
if(sort_map[r->source_name] == '1') { // sorted consecutive starting with 1 dimension keys
prm_tmp_d = prm_tmp;
//cout << "PATH 1 " << endl;
}
else {
//cout << "PATH 2 " << r->source_name << endl;
output = prm_tmp;
if(r->d_columns_int[r_map[key_map[it->first]]].size() == 0) {
r->d_columns_int[r_map[key_map[it->first]]].resize(r->maxRecs);
};
if(right_cpy == 0) {
r->CopyColumnToGpu(r_map[key_map[it->first]]);
};
thrust::lower_bound(r->d_columns_int[r_map[key_map[it->first]]].begin(), r->d_columns_int[r_map[key_map[it->first]]].end(),
output.begin(), output.end(),
prm_tmp_d.begin());
};
if(r->type[op_sel1.front()] != 1) {
thrust::device_ptr<int_type> d_tmp((int_type*)temp);
thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_tmp((float_type*)temp);
thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_float[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_float[op_sel1.front()].begin() + offset);
};
break;
};
};
};
op_sel1.pop();
//std::cout<< ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl;
};
hipFree(temp);
right_cpy = 1;
};
//std::cout<< "SEG " << i << " " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
//unload the segment indexes :
idx = left->fil_value;
already_loaded.clear();
while(!idx.empty()) {
if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) {
//extract table name and colname from index name
already_loaded.insert(idx.front());
size_t pos1 = idx.front().find_first_of(".", 0);
size_t pos2 = idx.front().find_first_of(".", pos1+1);
CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second;
string f1 = idx.front() + "." + to_string(i);
auto it = index_buffers.find(f1);
if(it != index_buffers.end()) {
hipHostFree(index_buffers[f1]);
index_buffers.erase(it);
};
};
idx.pop();
};
};
//if(verbose)
// std::cout<< "star join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
while(!op_join.empty()) {
varNames[op_join.front()]->deAllocOnDevice();
op_join.pop();
};
varNames[s] = c;
c->maxRecs = c->mRecCount;
if(verbose)
cout << endl << "join count " << c->mRecCount << endl;
};
void emit_join(const char *s, const char *j1, const int grp, const int start_seg, const int end_seg)
{
//cout << "emit_join " << s << " " << join_tab_cnt << " " << op_join.front() << endl;
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end() && data_dict.count(j1) == 0) {
process_error(2, "Join : couldn't find variable " + string(j1) );
};
if (stat.find(op_join.front()) == stat.end() && data_dict.count(op_join.front()) == 0) {
process_error(2, "Join : couldn't find variable " + op_join.front() );
};
stat[s] = statement_count;
stat[j1] = statement_count;
if(filter_var.find(j1) != filter_var.end()) {
stat[filter_var[j1]] = statement_count;
};
check_used_vars();
while(!op_join.empty()) {
stat[op_join.front()] = statement_count;
if(filter_var.find(op_join.front()) != filter_var.end()) {
stat[filter_var[op_join.front()]] = statement_count;
};
op_join.pop();
};
return;
};
queue<string> op_m(op_value);
if(check_star_join(j1)) {
if(verbose)
cout << "executing star join !! " << endl;
star_join(s, j1);
}
else {
if(join_tab_cnt > 1) {
string tab_name;
for(unsigned int i = 1; i <= join_tab_cnt; i++) {
if(i == join_tab_cnt)
tab_name = s;
else
tab_name = s + to_string(i);
string j, j2;
if(i == 1) {
j2 = op_join.front();
op_join.pop();
j = op_join.front();
op_join.pop();
}
else {
if(!op_join.empty()) {
j = op_join.front();
op_join.pop();
}
else
j = j1;
j2 = s + to_string(i-1);
};
emit_multijoin(tab_name, j, j2, i, s, start_seg, end_seg);
op_value = op_m;
};
}
else {
emit_multijoin(s, j1, op_join.front(), 1, s, start_seg, end_seg);
op_join.pop();
};
};
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_m.front());
op_m.pop();
op_sel_as.push(op_m.front());
op_m.pop();
};
while(!op_sel_as.empty()) {
//cout << "alias " << op_sel.front() << " : " << op_sel_as.front() << endl;
if(op_sel.front() != op_sel_as.front()) {
if(varNames[s]->type[op_sel.front()] == 0) {
varNames[s]->h_columns_int[op_sel_as.front()] = varNames[s]->h_columns_int[op_sel.front()];
varNames[s]->h_columns_int.erase(op_sel.front());
varNames[s]->d_columns_int[op_sel_as.front()] = varNames[s]->d_columns_int[op_sel.front()];
varNames[s]->d_columns_int.erase(op_sel.front());
varNames[s]->type[op_sel_as.front()] = 0;
varNames[s]->type.erase(op_sel.front());
}
else if(varNames[s]->type[op_sel.front()] == 1) {
varNames[s]->h_columns_float[op_sel_as.front()] = varNames[s]->h_columns_float[op_sel.front()];
varNames[s]->h_columns_float.erase(op_sel.front());
varNames[s]->d_columns_float[op_sel_as.front()] = varNames[s]->d_columns_float[op_sel.front()];
varNames[s]->d_columns_float.erase(op_sel.front());
varNames[s]->type[op_sel_as.front()] = 1;
varNames[s]->type.erase(op_sel.front());
varNames[s]->decimal.erase(op_sel.front());
}
else {
varNames[s]->h_columns_char[op_sel_as.front()] = varNames[s]->h_columns_char[op_sel.front()];
varNames[s]->h_columns_char.erase(op_sel.front());
varNames[s]->d_columns_char[op_sel_as.front()] = varNames[s]->d_columns_char[op_sel.front()];
varNames[s]->d_columns_char.erase(op_sel.front());
varNames[s]->type[op_sel_as.front()] = 2;
varNames[s]->type.erase(op_sel.front());
varNames[s]->char_size[op_sel_as.front()] = varNames[s]->char_size[op_sel.front()];
varNames[s]->char_size.erase(op_sel.front());
};
varNames[s]->decimal[op_sel_as.front()] = varNames[s]->decimal[op_sel.front()];
auto it = std::find(varNames[s]->columnNames.begin(), varNames[s]->columnNames.end(), op_sel.front());
*it = op_sel_as.front();
};
op_sel_as.pop();
op_sel.pop();
};
clean_queues();
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(op_join.size()) {
if(stat[op_join.front()] == statement_count && op_join.front().compare(j1) != 0) {
varNames[op_join.front()]->free();
varNames.erase(op_join.front());
};
};
}
template<typename T, typename P>
void p_gather(thrust::host_vector<int>& h_tmp, T* h, P* dest)
{
for(int i = 0; i < h_tmp.size(); i++) {
dest[i] = h[h_tmp[i]];
};
};
void emit_multijoin(const string s, const string j1, const string j2, const unsigned int tab, const char* res_name, const int start_segment, const int end_segment)
{
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
if(varNames.find(j1) == varNames.end())
cout << "Couldn't find j1 " << j1 << endl;
if(varNames.find(j2) == varNames.end())
cout << "Couldn't find j2 " << j2 << " here " << endl;
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
queue<string> op_sel_s(op_sel);
queue<string> op_sel_s_as(op_sel_as);
queue<string> op_g(op_value);
if(tab > 0) {
for(unsigned int z = 0; z < join_tab_cnt - tab; z++) {
for(unsigned int j = 0; j < join_and_cnt[z]*2 + 2; j++) {
op_sel_s.push(op_g.front());
op_sel_s_as.push(op_g.front());
op_g.pop();
};
};
};
string f1 = op_g.front();
op_g.pop();
string f2 = op_g.front();
op_g.pop();
if (verbose)
cout << "JOIN " << s << " " << f1 << " " << f2 << " " << getFreeMem() << " " << phase_copy << endl;
std::clock_t start1 = std::clock();
CudaSet* c = new CudaSet(right, left, op_sel_s, op_sel_s_as);
if ((left->mRecCount == 0 && !left->filtered) || (right->mRecCount == 0 && !right->filtered)) {
c = new CudaSet(left, right, op_sel_s, op_sel_s_as);
varNames[res_name] = c;
clean_queues();
return;
};
if(join_tab_cnt > 1 && tab < join_tab_cnt)
c->tmp_table = 1;
else
c->tmp_table = 0;
string colname1, colname2;
string tmpstr;
if (std::find(left->columnNames.begin(), left->columnNames.end(), f1) != left->columnNames.end()) {
colname1 = f1;
if (std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
colname2 = f2;
}
else {
process_error(2, "Couldn't find column " + f2 );
};
}
else if (std::find(right->columnNames.begin(), right->columnNames.end(), f1) != right->columnNames.end()) {
colname2 = f1;
tmpstr = f1;
f1 = f2;
if (std::find(left->columnNames.begin(), left->columnNames.end(), f2) != left->columnNames.end()) {
colname1 = f2;
f2 = tmpstr;
}
else {
process_error(2, "Couldn't find column " +f2 );
};
}
else {
process_error(2, "Couldn't find column " + f1);
};
if (!((left->type[colname1] == 0 && right->type[colname2] == 0) || (left->type[colname1] == 2 && right->type[colname2] == 2)
|| (left->type[colname1] == 1 && right->type[colname2] == 1 && left->decimal[colname1] && right->decimal[colname2]))) {
process_error(2, "Joins on floats are not supported ");
};
//bool decimal_join = 0;
//if (left->type[colname1] == 1 && right->type[colname2] == 1)
// decimal_join = 1;
queue<string> op_vd(op_g);
queue<string> op_g1(op_g);
queue<string> op_alt(op_sel);
unsigned int jc = join_and_cnt[join_tab_cnt - tab];
while(jc) {
jc--;
op_vd.pop();
op_alt.push(op_vd.front());
op_vd.pop();
};
size_t rcount = 0, cnt_r;
queue<string> cc;
if (left->type[colname1] == 2) {
left->d_columns_int[colname1] = thrust::device_vector<int_type>();
}
else {
cc.push(f1);
allocColumns(left, cc);
};
left->hostRecCount = left->mRecCount;
size_t cnt_l, res_count, tot_count = 0, offset = 0, k = 0;
queue<string> lc(cc);
thrust::device_vector<unsigned int> v_l(left->maxRecs);
//MGPU_MEM(int) aIndicesDevice, bIndicesDevice, intersectionDevice;
stack<string> exe_type;
set<string> field_names;
exe_type.push(f2);
for(unsigned int i = 0; i < right->columnNames.size(); i++) {
if (std::find(c->columnNames.begin(), c->columnNames.end(), right->columnNames[i]) != c->columnNames.end() || right->columnNames[i] == f2 || join_and_cnt[join_tab_cnt - tab]) {
field_names.insert(right->columnNames[i]);
};
};
thrust::device_vector<int> p_tmp;
unsigned int start_part = 0;
bool prejoin = 0;
while(start_part < right->segCount) {
right->deAllocOnDevice();
std::clock_t start12 = std::clock();
if(right->not_compressed || (!right->filtered && getFreeMem() < right->columnNames.size()*right->hostRecCount*8*2)) {
cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, start_part+1);
start_part = start_part+1;
}
else {
cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, right->segCount);
start_part = right->segCount;
for(unsigned int i=0; i < right->columnNames.size(); i++) {
if (right->type[right->columnNames[i]] != 1) {
right->d_columns_int[right->columnNames[i]].shrink_to_fit();
}
else
right->d_columns_float[right->columnNames[i]].shrink_to_fit();
};
};
right->mRecCount = cnt_r;
bool order = 1;
if(!right->presorted_fields.empty() && right->presorted_fields.front() == f2) {
order = 0;
//cout << "No need to sort " << endl;
if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size())
right->sort_check = '1';
else {
right->sort_check = '0';
};
};
if(order) {
if(thrust::is_sorted(right->d_columns_int[f2].begin(), right->d_columns_int[f2].end())) {
if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) {
right->sort_check = '1';
}
else {
right->sort_check = '0';
};
}
else {
//cout << "sorting " << endl;
size_t tot_size = right->mRecCount*8*right->columnNames.size();
if (getFreeMem() > tot_size*1.5) {
order_inplace(right, exe_type, field_names, 0);
}
else {
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
//cout << "sorting " << *it << endl;
if(right->type[*it] != 1) {
if(right->h_columns_int[*it].size() < right->mRecCount)
right->h_columns_int[*it].resize(right->mRecCount);
thrust::copy(right->d_columns_int[*it].begin(), right->d_columns_int[*it].begin() + right->mRecCount, right->h_columns_int[*it].begin());
}
else {
if(right->type[*it] == 1) {
if(right->h_columns_float[*it].size() < right->mRecCount)
right->h_columns_float[*it].resize(right->mRecCount);
};
thrust::copy(right->d_columns_float[*it].begin(), right->d_columns_float[*it].begin() + right->mRecCount, right->h_columns_float[*it].begin());
};
};
order_inplace_host(right, exe_type, field_names, 0);
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
if(right->type[*it] != 1)
thrust::copy(right->h_columns_int[*it].begin(), right->h_columns_int[*it].begin() + right->mRecCount, right->d_columns_int[*it].begin());
else
thrust::copy(right->h_columns_float[*it].begin(), right->h_columns_float[*it].begin() + right->mRecCount, right->d_columns_float[*it].begin());
};
};
};
};
//std::cout<< "join right load time " << ( ( std::clock() - start12 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
int e_segment;
if(end_segment == -1) {
e_segment = left->segCount;
}
else
e_segment = end_segment;
for (unsigned int i = start_segment; i < e_segment; i++) {
if(verbose)
//cout << "segment " << i << '\xd';
cout << "segment " << i << endl;
cnt_l = 0;
copyColumns(left, lc, i, cnt_l);
cnt_l = left->mRecCount;
auto join_eq_type1(join_eq_type);
if (cnt_l) {
// sort the left index column, save the permutation vector, it might be needed later
thrust::device_ptr<int_type> d_col((int_type*)thrust::raw_pointer_cast(left->d_columns_int[colname1].data()));
thrust::sequence(v_l.begin(), v_l.begin() + cnt_l,0,1);
bool do_sort = 1;
if(!left->sorted_fields.empty()) {
if(left->sorted_fields.front() == f1) {
do_sort = 0;
};
}
else if(!left->presorted_fields.empty()) {
if(left->presorted_fields.front() == f1) {
do_sort = 0;
};
};
if(do_sort) {
thrust::sort_by_key(d_col, d_col + cnt_l, v_l.begin());
}
else if(verbose)
cout << "No need of sorting " << endl;
if(prejoin) {
//res_count = SetOpKeys<MgpuSetOpIntersection, true>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
// thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
// &intersectionDevice, *context, false);
//if(!res_count)
// continue;
};
if (left->d_columns_int[colname1][0] > right->d_columns_int[colname2][cnt_r-1] ||
left->d_columns_int[colname1][cnt_l-1] < right->d_columns_int[colname2][0]) {
if(verbose)
cout << endl << "skipping after copying " << endl;
continue;
};
//else
// cout << "JOINING " << left->d_columns_int[colname1][0] << ":" << left->d_columns_int[colname1][cnt_l-1] << " AND " << right->d_columns_int[colname2][0] << ":" << right->d_columns_int[colname2][cnt_r-1] << endl;
//cout << "joining " << left->d_columns_int[colname1][0] << " : " << left->d_columns_int[colname1][cnt_l-1] << " and " << right->d_columns_int[colname2][0] << " : " << right->d_columns_int[colname2][cnt_r-1] << endl;
char join_kind = join_type.front();
std::clock_t start11 = std::clock();
mem_t<int2> res;
if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') {
//res_count = RelationalJoin<MgpuJoinKindInner>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
// thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
// &aIndicesDevice, &bIndicesDevice,
// mgpu::less<int_type>(), *context);
res = inner_join(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, less_t<int_type>(), context);
};
res_count = res.size();
/* else if(join_kind == 'L')
res_count = RelationalJoin<MgpuJoinKindLeft>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
else if(join_kind == 'R')
res_count = RelationalJoin<MgpuJoinKindRight>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
else if(join_kind == 'O')
res_count = RelationalJoin<MgpuJoinKindOuter>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
*/
if(verbose)
std::cout<< "join time " << ( ( std::clock() - start11 ) / (double)CLOCKS_PER_SEC ) << '\n';
if(verbose)
cout << "RES " << res_count << endl;
if(res_count == 0)
prejoin = 1;
thrust::device_ptr<int> d_res1 = thrust::device_malloc<int>(res_count);
thrust::device_ptr<int> d_res2 = thrust::device_malloc<int>(res_count);
thrust::counting_iterator<unsigned int> begin(0);
split_int2 ff(thrust::raw_pointer_cast(d_res1), thrust::raw_pointer_cast(d_res2), res.data());
thrust::for_each(begin, begin + res_count, ff);
if(res_count) {
p_tmp.resize(res_count);
thrust::sequence(p_tmp.begin(), p_tmp.end(),-1);
thrust::gather_if(d_res1, d_res1+res_count, d_res1, v_l.begin(), p_tmp.begin(), _1 >= 0);
};
// check if the join is a multicolumn join
unsigned int mul_cnt = join_and_cnt[join_tab_cnt - tab];
while(mul_cnt) {
mul_cnt--;
queue<string> mult(op_g);
string f3 = mult.front();
mult.pop();
string f4 = mult.front();
mult.pop();
//cout << "ADDITIONAL COL JOIN " << f3 << " " << f4 << " " << join_eq_type.front() << endl;
queue<string> rc;
rc.push(f3);
allocColumns(left, rc);
size_t offset = 0;
copyColumns(left, rc, i, offset, 0, 0);
rc.pop();
if (res_count) {
thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(res_count);
if(right->d_columns_int[f4].size() == 0)
load_queue(rc, right, f4, rcount, 0, right->segCount, 0, 0);
if (left->type[f3] == 1 && right->type[f4] == 1) {
thrust::transform(make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.begin()),
make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.end()),
make_permutation_iterator(right->d_columns_float[f4].begin(), d_res2),
d_add, float_equal_to());
}
else {
if(join_eq_type1.front() != 'N')
thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()),
make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()),
make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2),
d_add, thrust::equal_to<int_type>());
else {
thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()),
make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()),
make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2),
d_add, thrust::not_equal_to<int_type>());
};
};
if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') { // result count changes only in case of an inner join
unsigned int new_cnt = thrust::count(d_add, d_add+res_count, 1);
thrust::stable_partition(d_res2, d_res2 + res_count, d_add, thrust::identity<unsigned int>());
thrust::stable_partition(p_tmp.begin(), p_tmp.end(), d_add, thrust::identity<unsigned int>());
res_count = new_cnt;
}
else { //otherwise we consider it a valid left join result with non-nulls on the left side and nulls on the right side
thrust::transform(d_res2, d_res2 + res_count, d_add , d_res2, set_minus());
};
thrust::device_free(d_add);
};
if(!join_eq_type1.empty())
join_eq_type1.pop();
};
while(!join_eq_type1.empty())
join_eq_type1.pop();
//cout << "MUL res_count " << res_count << endl;
if(join_kind == '1') { //LEFT SEMI
thrust::sort(p_tmp.begin(), p_tmp.begin() + res_count);
auto new_end = thrust::unique(p_tmp.begin(), p_tmp.begin() + res_count);
res_count = new_end - p_tmp.begin();
}
else if(join_kind == '2'){ // RIGHT SEMI
thrust::sort(d_res2, d_res2 + res_count);
auto new_end = thrust::unique(d_res2, d_res2 + res_count);
res_count = new_end - d_res2;
auto old_sz = ranj.size();
ranj.resize(ranj.size() + res_count);
thrust::copy(d_res2, d_res2 + res_count, ranj.begin() + old_sz);
thrust::sort(ranj.begin(), ranj.end());
auto ra_cnt = thrust::unique(ranj.begin(), ranj.end());
ranj.resize(ra_cnt-ranj.begin());
}
else if(join_kind == '3'){ // ANTI JOIN LEFT
thrust::counting_iterator<int> iter(0);
thrust::device_vector<int> rr(cnt_l);
auto new_end = thrust::set_difference(iter, iter+cnt_l, p_tmp.begin(), p_tmp.begin() + res_count, rr.begin());
res_count = new_end - rr.begin();
thrust::copy(rr.begin(), new_end, p_tmp.begin());
}
else if(join_kind == '4'){ // ANTI JOIN RIGHT
thrust::sort(d_res2, d_res2 + res_count);
auto new_end = thrust::unique(d_res2, d_res2 + res_count);
auto cnt = new_end - d_res2;
thrust::device_vector<int> seq(cnt + ranj.size());
//auto new_end = thrust::set_difference(seq.begin(), seq.end(), d_res2, d_res2 + res_count, rr.begin());
auto new_end1 = thrust::set_union(d_res2, d_res2 + cnt, ranj.begin(), ranj.end(), seq.begin());
auto s_cnt = new_end1 - seq.begin();
thrust::sort(seq.begin(), seq.begin() + s_cnt);
auto end_seq = thrust::unique(seq.begin(), seq.begin() + s_cnt);
auto u_cnt = end_seq - seq.begin();
ranj.resize(u_cnt);
thrust::copy(seq.begin(), seq.begin() + u_cnt, ranj.begin());
thrust::sort(ranj.begin(), ranj.end());
auto ra_cnt = thrust::unique(ranj.begin(), ranj.end());
ranj.resize(ra_cnt-ranj.begin());
}
tot_count = tot_count + res_count;
//cout << "tot " << tot_count << endl;
//std::clock_t start12 = std::clock();
if(res_count && join_kind != '4' && join_kind != '2') {
offset = c->mRecCount;
queue<string> op_sel1(op_sel_s);
c->resize_join(res_count);
if(scratch.size() < res_count*int_size)
scratch.resize(res_count*int_size);
thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0);
std::map<string,bool> processed;
while(!op_sel1.empty()) {
if (processed.find(op_sel1.front()) != processed.end()) {
op_sel1.pop();
continue;
}
else
processed[op_sel1.front()] = 1;
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end() && join_kind != '2') {
allocColumns(left, cc);
copyColumns(left, cc, i, k, 0, 0);
//gather
if(left->type[op_sel1.front()] != 1 ) {
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_float[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset);
};
if(op_sel1.front() != colname1)
left->deAllocColumnOnDevice(op_sel1.front());
//};
}
else if(std::find(right->columnNames.begin(), right->columnNames.end(), op_sel1.front()) != right->columnNames.end()) {
//gather
if(right->type[op_sel1.front()] != 1) {
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(d_res2, d_res2 + res_count, right->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(d_res2, d_res2 + res_count, right->d_columns_float[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset);
}
}
else {
};
op_sel1.pop();
};
};
thrust::device_free(d_res1);
thrust::device_free(d_res2);
};
};
if(join_type.front() == '4') {
thrust::device_vector<int> st(cnt_r);
thrust::sequence(st.begin(), st.end(),0,1);
thrust::device_vector<int> r(cnt_r);
auto new_end = thrust::set_difference(st.begin(), st.end(), ranj.begin(), ranj.end(), r.begin());
ranj.resize(0);
res_count = new_end - r.begin();
tot_count = res_count;
queue<string> op_sel1(op_sel_s);
c->resize_join(res_count);
if(scratch.size() < res_count*int_size)
scratch.resize(res_count*int_size);
thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0);
std::map<string,bool> processed;
while(!op_sel1.empty()) {
if (processed.find(op_sel1.front()) != processed.end()) {
op_sel1.pop();
continue;
}
else
processed[op_sel1.front()] = 1;
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(r.begin(), r.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin());
op_sel1.pop();
};
}
else if(join_type.front() == '2') {
res_count = ranj.size();
tot_count = res_count;
queue<string> op_sel1(op_sel_s);
c->resize_join(res_count);
if(scratch.size() < res_count*int_size)
scratch.resize(res_count*int_size);
thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0);
std::map<string,bool> processed;
while(!op_sel1.empty()) {
if (processed.find(op_sel1.front()) != processed.end()) {
op_sel1.pop();
continue;
}
else
processed[op_sel1.front()] = 1;
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(ranj.begin(), ranj.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin());
op_sel1.pop();
};
ranj.resize(0);
};
};
left->deAllocOnDevice();
right->deAllocOnDevice();
c->deAllocOnDevice();
varNames[s] = c;
c->mRecCount = tot_count;
c->hostRecCount = tot_count;
c->name = s;
if(verbose)
cout << "tot res " << tot_count << " " << getFreeMem() << endl;
if(right->tmp_table == 1) {
right->free();
varNames.erase(j2);
}
else {
if(stat[j2] == statement_count) {
right->free();
varNames.erase(j2);
};
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
join_type.pop();
if(!join_eq_type.empty())
join_eq_type.pop();
size_t tot_size = tot_count*8*c->columnNames.size();
if (getFreeMem() > tot_size) {
c->maxRecs = tot_count;
c->segCount = 1;
}
else {
c->segCount = ((tot_size)/getFreeMem() + 1);
c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1);
};
if(verbose)
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
}
void order_on_host(CudaSet *a, CudaSet* b, queue<string> names, stack<string> exe_type, stack<string> exe_value)
{
unsigned int tot = 0;
if(!a->not_compressed) { //compressed
allocColumns(a, names);
unsigned int c = 0;
size_t cnt = 0;
for(unsigned int i = 0; i < a->segCount; i++) {
copyColumns(a, names, (a->segCount - i) - 1, cnt); //uses segment 1 on a host to copy data from a file to gpu
if (a->mRecCount) {
a->CopyToHost((c - tot) - a->mRecCount, a->mRecCount);
tot = tot + a->mRecCount;
};
};
}
else
tot = a->mRecCount;
b->resize(tot); //resize host arrays
a->mRecCount = tot;
unsigned int* permutation = new unsigned int[a->mRecCount];
thrust::sequence(permutation, permutation + a->mRecCount);
size_t maxSize = a->mRecCount;
char* temp;
temp = new char[maxSize*max_char(a)];
// sort on host
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
if (a->type[exe_type.top()] == 0)
update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp);
else if (a->type[exe_type.top()] == 1)
update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
update_char_permutation(a, exe_type.top(), permutation, exe_value.top(), temp, 1);
};
};
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if (a->type[a->columnNames[i]] != 1) {
apply_permutation_host(a->h_columns_int[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_int[a->columnNames[i]].data());
}
else
apply_permutation_host(a->h_columns_float[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_float[a->columnNames[i]].data());
};
delete [] temp;
delete [] permutation;
}
void emit_order(const char *s, const char *f, const int e, const int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Order : couldn't find variable " + string(f));
};
stat[s] = statement_count;
stat[f] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
return;
};
if (scan_state == 0) {
check_used_vars();
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
stack<string> exe_type, exe_value;
if(verbose)
cout << "ORDER: " << s << " " << f << endl;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
if(std::find(a->columnNames.begin(), a->columnNames.end(), exe_type.top()) == a->columnNames.end()) {
process_error(2, "Couldn't find name " + exe_type.top());
};
};
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
tp.pop();
};
queue<string> names;
for (unsigned int i = 0; i < a->columnNames.size() ; i++ )
names.push(a->columnNames[i]);
CudaSet *b = a->copyDeviceStruct();
//lets find out if our data set fits into a GPU
size_t mem_available = getFreeMem();
size_t rec_size = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[a->columnNames[i]] == 0)
rec_size = rec_size + int_size;
else if(a->type[a->columnNames[i]] == 1)
rec_size = rec_size + float_size;
else
rec_size = rec_size + a->char_size[a->columnNames[i]];
};
bool fits;
if (rec_size*a->mRecCount > (mem_available/2)) // doesn't fit into a GPU
fits = 0;
else fits = 1;
if(!fits) {
order_on_host(a, b, names, exe_type, exe_value);
}
else {
// initialize permutation to [0, 1, 2, ... ,N-1]
size_t rcount;
if(a->filtered) {
CudaSet *t = varNames[a->source_name];
a->mRecCount = t->mRecCount;
a->hostRecCount = a->mRecCount;
};
a->mRecCount = load_queue(names, a, op_vx.front(), rcount, 0, a->segCount);
if(scratch.size() < a->mRecCount)
scratch.resize(a->mRecCount*4);
thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* perm_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, a->mRecCount*max_char(a)));
if(a->filtered)
varNames[a->source_name]->hostRecCount = varNames[a->source_name]->mRecCount;
else
a->hostRecCount = a->mRecCount;;
if(a->filtered)
varNames[a->source_name]->mRecCount = varNames[a->source_name]->hostRecCount;
else
a->mRecCount = a->hostRecCount;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
if (a->type[exe_type.top()] == 0 && a->string_map.find(exe_type.top()) == a->string_map.end())
update_permutation(a->d_columns_int[exe_type.top()], perm_ptr, a->mRecCount, exe_value.top(), (int_type*)temp, 64);
else if (a->type[exe_type.top()] == 1)
update_permutation(a->d_columns_float[exe_type.top()], perm_ptr, a->mRecCount,exe_value.top(), (float_type*)temp, 64);
else {
//get strings to device
update_char_permutation(a, exe_type.top(), perm_ptr, exe_value.top(), temp, 0);
};
};
b->resize(a->mRecCount); //resize host arrays
b->mRecCount = a->mRecCount;
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if (a->type[a->columnNames[i]] != 1) {
apply_permutation(a->d_columns_int[a->columnNames[i]], perm_ptr, a->mRecCount, (int_type*)temp, 64);
}
else
apply_permutation(a->d_columns_float[a->columnNames[i]], perm_ptr, a->mRecCount, (float_type*)temp, 64);
};
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[a->columnNames[i]] != 1) {
thrust::copy(a->d_columns_int[a->columnNames[i]].begin(), a->d_columns_int[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_int[a->columnNames[i]].begin());
}
else
thrust::copy(a->d_columns_float[a->columnNames[i]].begin(), a->d_columns_float[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_float[a->columnNames[i]].begin());
};
b->deAllocOnDevice();
a->deAllocOnDevice();
hipFree(temp);
};
varNames[s] = b;
b->segCount = 1;
b->not_compressed = 1;
b->string_map = a->string_map;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(const char *s, const char *f, const int grp_cnt)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Select : couldn't find variable " + string(f) );
};
stat[s] = statement_count;
stat[f] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
check_used_vars();
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
cout << "Couldn't find1 " << f << endl;
process_error(2, "Couldn't find(1) " + string(f) );
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > grp_cnt)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < grp_cnt; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
if(varNames.find(f) != varNames.end())
a = varNames.find(f)->second;
else {
process_error(2, "Couldn't find " + string(f) );
};
if(a->mRecCount == 0 && !a->filtered) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
c->name = s;
clean_queues();
if(verbose)
cout << "SELECT " << s << " count : 0, Mem " << getFreeMem() << endl;
return;
};
if(verbose)
cout << "SELECT " << s << " " << f << " " << getFreeMem() << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
while(!op_v.empty()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) {
tt = op_v.front();
op_v.pop();
if(!op_v.empty()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end()) {
if(aliases.count(tt) == 0) {
aliases[tt] = op_v.front();
};
}
else {
while(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end() && !op_v.empty()) {
op_v.pop();
};
};
};
};
if(!op_v.empty())
op_v.pop();
};
op_v = op_value;
while(!op_v.empty()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
};
op_v.pop();
};
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet *b, *c;
if(a->segCount <= 1)
setSegments(a, op_vx);
allocColumns(a, op_vx);
unsigned int cycle_count;
if(a->filtered)
cycle_count = varNames[a->source_name]->segCount;
else
cycle_count = a->segCount;
size_t ol_count = a->mRecCount, cnt;
a->hostRecCount = a->mRecCount;
b = new CudaSet(0, col_count);
b->name = "tmp b in select";
bool c_set = 0;
//size_t tmp_size = a->mRecCount;
//if(a->segCount > 1)
// tmp_size = a->maxRecs;
vector<thrust::device_vector<int_type> > distinct_val; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_hash; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_tmp;
/* for(unsigned int i = 0; i < distinct_cnt; i++) {
distinct_tmp.push_back(thrust::device_vector<int_type>(tmp_size));
distinct_val.push_back(thrust::device_vector<int_type>());
distinct_hash.push_back(thrust::device_vector<int_type>());
};
*/
bool one_liner;
if (grp_cnt != 0)
phase_copy = 1;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
if(verbose)
cout << "segment " << i << " select mem " << getFreeMem() << endl;
std::clock_t start3 = std::clock();
cnt = 0;
copyColumns(a, op_vx, i, cnt);
if(a->mRecCount) {
if (grp_cnt != 0) {
bool srt = 0;
stack<string> op_vv(op_v2);
while(!op_vv.empty()) {
if(!min_max_eq[op_vv.top()])
srt = 1;
op_vv.pop();
};
if(srt) {
order_inplace(a, op_v2, field_names, 1);
a->GroupBy(op_v2);
}
else {
if(a->grp.size() < a->mRecCount)
a->grp.resize(a->mRecCount);
thrust::fill(a->grp.begin(),a->grp.begin()+a->mRecCount,0);
a->grp[a->mRecCount-1] = 1;
a->grp_count = 1;
};
}
else
a->grp_count = 0;
copyFinalize(a, op_vx,0);
one_liner = select(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a,b, distinct_tmp);
if(i == 0)
std::reverse(b->columnNames.begin(), b->columnNames.end());
if (!c_set && b->mRecCount > 0) {
c = new CudaSet(0, col_count);
create_c(c,b);
c_set = 1;
c->name = s;
};
if (grp_cnt && cycle_count > 1 && b->mRecCount > 0) {
add(c,b,op_v3, aliases, distinct_tmp, distinct_val, distinct_hash, a);
}
else {
//copy b to c
unsigned int c_offset = c->mRecCount;
c->resize(b->mRecCount);
for(unsigned int j=0; j < b->columnNames.size(); j++) {
if (b->type[b->columnNames[j]] == 0) {
thrust::copy(b->d_columns_int[b->columnNames[j]].begin(), b->d_columns_int[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_int[b->columnNames[j]].begin() + c_offset);
}
else if (b->type[b->columnNames[j]] == 1) {
thrust::copy(b->d_columns_float[b->columnNames[j]].begin(), b->d_columns_float[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_float[b->columnNames[j]].begin() + c_offset);
};
};
};
//std::cout<< "add time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << '\n';
};
std::cout<< "cycle sel time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
};
phase_copy = 0;
a->mRecCount = ol_count;
a->mRecCount = a->hostRecCount;
a->deAllocOnDevice();
b->deAllocOnDevice();
a->grp.resize(0);
a->grp.shrink_to_fit();
for(auto i = 0; i < alloced_mem.size(); i++) {
hipFree(alloced_mem[i]);
alloced_mem.pop_back();
};
if(!c_set) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
c->name = s;
clean_queues();
return;
};
if (grp_cnt) {
count_avg(c, distinct_hash);
}
else {
if(one_liner) {
count_simple(c);
};
};
c->maxRecs = c->mRecCount;
c->hostRecCount = c->mRecCount;
c->string_map = b->string_map;
c->name = s;
c->keep = 1;
if(verbose)
cout << "select res " << c->mRecCount << endl;
size_t tot_size = c->maxRecs*8*c->columnNames.size();
if (getFreeMem() < tot_size*3) {
c->segCount = ((tot_size*3)/getFreeMem() + 1);
c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1);
};
clean_queues();
varNames[s] = c;
b->free();
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
if(verbose)
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_insert(const char *f, const char* s) {
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Insert : couldn't find variable " + string(f));
};
if (stat.find(s) == stat.end() && data_dict.count(s) == 0) {
process_error(2, "Insert : couldn't find variable " + string(s) );
};
check_used_vars();
stat[f] = statement_count;
stat[s] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end() || varNames.find(s) == varNames.end()) {
clean_queues();
return;
};
if(verbose)
cout << "INSERT " << f << " " << s << endl;
insert_records(f,s);
clean_queues();
};
void emit_delete(const char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Delete : couldn't find variable " + string(f));
};
stat[f] = statement_count;
check_used_vars();
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
delete_records(f);
cout << "DELETE " << f << endl;
clean_queues();
}
void emit_case()
{
op_case = 1;
if (scan_state == 1)
cout << "emit case " << endl;
//extract releveant values and pass to modified filter
// get a bool vector back
/* while(!op_type.empty())
{
cout << "CASE type " << op_type.front() << endl;
op_type.pop();
}
*/
}
void emit_create_index(const char *index_name, const char *table, const char *column)
{
if (scan_state != 0) {
FILE *f;
string s1(table);
string s3 = s1 + ".key";
f = fopen(s3.c_str(), "w");
fputs(column,f);
fclose(f);
};
}
void emit_create_interval(const char *interval_name, const char *table, const char *lcolumn, const char *rcolumn)
{
if (scan_state != 0) {
FILE *f;
string s1(table);
string s3 = s1 + ".interval";
f = fopen(s3.c_str(), "w");
fputs(lcolumn,f);
fputc('|',f);
fputs(rcolumn,f);
fclose(f);
};
}
void emit_create_bitmap_index(const char *index_name, const char *ltable, const char *rtable, const char *rcolumn, const char *lid, const char *rid)
{
statement_count++;
if (scan_state == 0) {
emit_name(rcolumn);
emit_sel_name(rcolumn);
emit_name(lid);
emit_name(rid);
check_used_vars();
stat[rtable] = std::numeric_limits<unsigned int>::max();
stat[ltable] = std::numeric_limits<unsigned int>::max();
}
else {
cout << ltable << " " << rtable << " " << rid << " " << lid << endl;
emit_name(rcolumn);
emit_sel_name(rcolumn);
emit_name(lid);
emit_name(rid);
check_used_vars();
if(varNames.find(ltable) == varNames.end())
cout << "Couldn't find " << ltable << endl;
if(varNames.find(rtable) == varNames.end())
cout << "Couldn't find " << rtable << endl;
CudaSet* left = varNames.find(ltable)->second;
CudaSet* right = varNames.find(rtable)->second;
queue<string> op_vx;
op_vx.push(rcolumn);op_vx.push(rid);
allocColumns(right, op_vx);
right->CopyColumnToGpu(rid, 0, 0);
right->CopyColumnToGpu(rcolumn, 0, 0);
op_vx.pop();op_vx.pop();
op_vx.push(lid);
allocColumns(left, op_vx);
for(int i = 0; i < left->segCount; i++) {
left->CopyColumnToGpu(lid, i, 0);
thrust::device_vector<unsigned int> output(left->mRecCount);
thrust::lower_bound(right->d_columns_int[rid].begin(), right->d_columns_int[rid].begin() + right->mRecCount,
left->d_columns_int[lid].begin(), left->d_columns_int[lid].begin() + left->mRecCount, output.begin());
string str = std::string(ltable) + std::string(".") + std::string(rtable) + std::string(".") + std::string(rcolumn) + std::string(".") + to_string(i);
thrust::device_vector<int_type> res(left->mRecCount);
thrust::host_vector<int_type> res_h(left->mRecCount);
if(right->type[rcolumn] == 0) {
thrust::gather(output.begin(), output.begin() + left->mRecCount, right->d_columns_int[rcolumn].begin() , res.begin());
thrust::copy(res.begin(), res.begin() + left->mRecCount, res_h.begin());
compress_int(str, res_h);
}
else if(right->type[rcolumn] == 1) {
}
else { //strings
string f1 = right->load_file_name + "." + rcolumn + ".0.hash"; //need to change it in case if there are dimensions tables larger than 1 segment ?
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
if(res_h.size() < cnt)
res_h.resize(cnt);
if(res.size() < cnt)
res.resize(cnt);
fread(res_h.data(), cnt*8, 1, f);
res = res_h;
fclose(f);
thrust::device_vector<int_type> output1(left->mRecCount);
thrust::gather(output.begin(), output.begin() + left->mRecCount ,
res.begin(), output1.begin());
thrust::copy(output1.begin(), output1.begin() + left->mRecCount, res_h.begin());
compress_int(str, res_h);
};
};
};
}
void emit_display(const char *f, const char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Filter : couldn't find variable " + string(f) );
};
stat[f] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Display(limit, 0, 1);
clean_queues();
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
}
void emit_filter(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(1, "Filter : couldn't find variable " + string(f));
};
stat[s] = statement_count;
stat[f] = statement_count;
filter_var[s] = f;
// check possible use of other variables in filters
queue<string> op(op_value);
while(!op.empty()) {
size_t pos1 = op.front().find_first_of(".", 0);
if(pos1 != string::npos) {
stat[op.front().substr(0,pos1)] = statement_count;
};
op.pop();
};
check_used_vars();
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "INLINE FILTER " << f << endl;
b = a->copyDeviceStruct();
b->name = s;
b->sorted_fields = a->sorted_fields;
b->presorted_fields = a->presorted_fields;
//save the stack
b->fil_s = s;
b->fil_f = f;
b->fil_type = op_type;
b->fil_value = op_value;
b->fil_nums = op_nums;
b->fil_nums_f = op_nums_f;
b->fil_nums_precision = op_nums_precision;
b->filtered = 1;
b->tmp_table = a->tmp_table;
b->string_map = a->string_map;
if(a->filtered) {
b->source_name = a->source_name;
b->fil_f = a->fil_f;
while(!a->fil_value.empty()) {
b->fil_value.push(a->fil_value.front());
a->fil_value.pop();
};
while(!a->fil_type.empty()) {
b->fil_type.push(a->fil_type.front());
a->fil_type.pop();
};
b->fil_type.push("AND");
while(!a->fil_nums.empty()) {
b->fil_nums.push(a->fil_nums.front());
a->fil_nums.pop();
};
while(!a->fil_nums_precision.empty()) {
b->fil_nums_precision.push(a->fil_nums_precision.front());
a->fil_nums_precision.pop();
};
while(!a->fil_nums_f.empty()) {
b->fil_nums_f.push(a->fil_nums_f.front());
a->fil_nums_f.pop();
};
a->filtered = 0;
varNames.erase(f);
}
else
b->source_name = f;
b->maxRecs = a->maxRecs;
b->prm_d.resize(a->maxRecs);
};
b->hostRecCount = a->hostRecCount;
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
}
void emit_store(const char *s, const char *f, const char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end() && data_dict.count(s) == 0) {
process_error(2, "Store : couldn't find variable " + string(s) );
};
stat[s] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
clean_queues();
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(verbose)
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(const char *s, const char *f, const bool append)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end() && data_dict.count(s) == 0) {
process_error(2, "Store : couldn't find variable " + string(s));
};
stat[s] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
clean_queues();
return;
};
cout << "Append " << append << endl;
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
total_count = 0;
total_segments = 0;
a->maxRecs = 0;
if(fact_file_loaded) {
a->Store(f,"", limit, 1, append);
}
else {
FILE* file_p;
if(a->text_source) {
file_p = fopen(a->load_file_name.c_str(), "rb");
if (!file_p) {
process_error(2, "Could not open file " + a->load_file_name );
};
};
thrust::device_vector<char> d_readbuff;
thrust::device_vector<char*> dest(a->mColumnCount);
thrust::device_vector<unsigned int> ind(a->mColumnCount);
thrust::device_vector<unsigned int> dest_len(a->mColumnCount);
while(!fact_file_loaded) {
if(verbose)
cout << "LOADING " << a->load_file_name << " mem: " << getFreeMem() << endl;
if(a->text_source)
fact_file_loaded = a->LoadBigFile(file_p, d_readbuff, dest, ind, dest_len);
if(a->maxRecs < a->mRecCount)
a->maxRecs = a->mRecCount;
a->Store(f,"", limit, 1, append);
};
};
a->writeSortHeader(f);
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(const char *s, const char *f, const int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
if(verbose)
printf("BINARY LOAD: %s %s \n", s, f);
std::clock_t start1 = std::clock();
CudaSet *a;
unsigned int segCount, maxRecs;
string f1(f);
f1 += "." + namevars.front() + ".header";
FILE* ff = fopen(f1.c_str(), "rb");
if(!ff) {
process_error(2, "Couldn't open file " + f1);
};
size_t totRecs;
fread((char *)&totRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
if(verbose)
cout << "Reading " << totRecs << " records" << endl;
a = new CudaSet(namevars, typevars, sizevars, cols, totRecs, f, maxRecs);
a->segCount = segCount;
a->keep = true;
a->name = s;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
std::cout<< "load time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_load(const char *s, const char *f, const int d, const char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->keep = true;
a->not_compressed = 1;
a->load_file_name = f;
a->separator = sep;
varNames[s] = a;
fact_file_loaded = 0;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void emit_show_tables()
{
if (scan_state == 1) {
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
cout << (*it).first << endl;
};
};
return;
}
void emit_drop_table(const char* table_name)
{
if (scan_state == 1) {
map<string, map<string, col_data> >::iterator iter;
if((iter = data_dict.find(table_name)) != data_dict.end()) {
auto s = (*iter).second;
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
int seg = 0;
string f_name = (*iter).first + "." + (*it).first + "." + to_string(seg);
while(!remove(f_name.c_str())) {
seg++;
f_name = (*iter).first + "." + (*it).first + "." + to_string(seg);
};
f_name = (*iter).first + "." + (*it).first + ".header";
remove(f_name.c_str());
};
};
string s_name = (*iter).first + ".presort";
remove(s_name.c_str());
s_name = (*iter).first + ".sort";
remove(s_name.c_str());
if(data_dict.find(table_name) != data_dict.end()) {
data_dict.erase(table_name);
};
save_dict = 1;
};
return;
}
void emit_describe_table(const char* table_name)
{
if (scan_state == 1) {
map<string, map<string, col_data> >::iterator iter;
if((iter = data_dict.find(table_name)) != data_dict.end()) {
auto s = (*iter).second;
for (auto it=s.begin() ; it != s.end(); ++it ) {
if ((*it).second.col_type == 0) {
if((*it).second.col_length) {
if((*it).second.col_length != UINT_MAX)
cout << (*it).first << " decimal with precision of " << (*it).second.col_length << endl;
else
cout << (*it).first << " timestamp" << endl;
}
else
cout << (*it).first << " integer" << endl;
}
else if ((*it).second.col_type == 1) {
cout << (*it).first << " float" << endl;
}
else if ((*it).second.col_type == 3) {
cout << (*it).first << " decimal" << endl;
}
else {
cout << (*it).first << " char(" << (*it).second.col_length << ")" << endl;
};
};
};
};
return;
}
void yyerror(char *s, ...)
{
extern int yylineno;
extern char *yytext;
fprintf(stderr, "%d: error: ", yylineno);
cout << yytext << endl;
error_cb(1, s);
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!op_nums_precision.empty()) op_nums_precision.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
while(!op_sort.empty()) op_sort.pop();
while(!op_presort.empty()) op_presort.pop();
while(!join_type.empty()) join_type.pop();
while(!join_eq_type.empty()) join_eq_type.pop();
op_case = 0;
sel_count = 0;
join_cnt = 0;
join_col_cnt = 0;
distinct_cnt = 0;
join_tab_cnt = 0;
tab_cnt = 0;
join_and_cnt.clear();
}
void load_vars()
{
if(used_vars.size() == 0) {
//cout << "Error, no valid column names have been found " << endl;
//exit(0);
}
else {
for (auto it=used_vars.begin(); it != used_vars.end(); ++it ) {
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
if(stat.count((*it).first) != 0) {
auto c = (*it).second;
for (auto sit=c.begin() ; sit != c.end(); ++sit ) {
//cout << "name " << (*sit).first << " " << data_dict[(*it).first][(*sit).first].col_length << endl;
namevars.push((*sit).first);
if(data_dict[(*it).first][(*sit).first].col_type == 0) {
if(data_dict[(*it).first][(*sit).first].col_length == 0) {
typevars.push("int");
}
else {
if(data_dict[(*it).first][(*sit).first].col_length == UINT_MAX)
typevars.push("timestamp");
else
typevars.push("decimal");
}
}
else if(data_dict[(*it).first][(*sit).first].col_type == 1)
typevars.push("float");
else typevars.push("char");
sizevars.push(data_dict[(*it).first][(*sit).first].col_length);
cols.push(0);
};
emit_load_binary((*it).first.c_str(), (*it).first.c_str(), 0);
};
};
};
}
void process_error(int severity, string err) {
switch (severity) {
case 1:
err = "(Warning) " + err;
break;
case 2:
err = "(Fatal) " + err;
break;
default:
err = "(Aborting) " + err;
break;
}
error_cb(severity, err.c_str()); // send the error to the c based callback
}
void alenkaInit(char ** av)
{
process_count = 1000000000;
verbose = 0;
scan_state = 1;
statement_count = 0;
clean_queues();
//context = CreateCudaDevice(0, nullptr, true);
}
void alenkaClose()
{
statement_count = 0;
if(alloced_sz) {
hipFree(alloced_tmp);
alloced_sz = 0;
};
}
| 5b006e5a252c8944d9fd933b29bc40a2cfc1ec41.cu | #include "operators.h"
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/set_operations.h>
#include "moderngpu/src/moderngpu/kernel_join.hxx"
struct is_even
{
__host__ __device__
bool operator()(const int &x)
{
return (x % 2) == 0;
}
};
using namespace mgpu;
using namespace std;
using namespace thrust::placeholders;
size_t int_size = sizeof(int_type);
size_t float_size = sizeof(float_type);
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
unsigned int distinct_cnt = 0;
unsigned int join_col_cnt = 0;
unsigned int join_tab_cnt = 0;
unsigned int tab_cnt = 0;
queue<string> op_join;
queue<char> join_type;
queue<char> join_eq_type;
unsigned int partition_count;
map<string,unsigned int> stat;
map<unsigned int, unsigned int> join_and_cnt;
map<string, map<string, bool> > used_vars;
bool save_dict = 0;
thrust::device_vector<unsigned char> scratch;
map<string, string> filter_var;
thrust::device_vector<int> ranj;
unsigned long long int currtime;
standard_context_t context;
void check_used_vars()
{
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
auto s = (*it).second;
auto vars(op_value);
while(!vars.empty()) {
if(s.count(vars.front()) != 0) {
used_vars[(*it).first][vars.front()] = 1;
};
vars.pop();
}
};
}
void emit_name(const char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(const int val)
{
op_nums.push(val);
}
void emit_string(const char *str)
{ // remove the float_type quotes
if(str[0] == '"') {
string sss(str,1, strlen(str)-2);
op_value.push(sss);
}
else {
string sss(str);
op_value.push(sss);
};
op_type.push("STRING");
}
void emit_string_grp(const char *str, const char *str_grp)
{
emit_string(str);
grp_val = str_grp;
};
void emit_fieldname(const char* name1, const char* name2)
{
string s1(name1);
string s2(name2);
op_type.push("FIELD");
op_value.push(s1 + "." + s2);
};
void emit_number(const int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
op_nums_precision.push(0);
}
void emit_float(const float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(const char* str)
{
op_type.push("NUMBER");
string s1(str);
unsigned int precision;
auto pos = s1.find(".");
if(pos == std::string::npos)
precision = 0;
else {
precision = (s1.length() - pos) -1;
s1.erase(pos,1);
};
op_nums.push(stoi(s1));
op_nums_precision.push(precision);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
unsigned int misses = 0;
void emit_and()
{
op_type.push("AND");
join_col_cnt++;
}
void emit_eq()
{
op_type.push("JOIN");
join_eq_type.push('E');
if(misses == 0) {
join_and_cnt[tab_cnt] = join_col_cnt;
misses = join_col_cnt;
join_col_cnt = 0;
tab_cnt++;
}
else {
misses--;
}
}
void emit_neq()
{
op_type.push("JOIN");
join_eq_type.push('N');
if(misses == 0) {
join_and_cnt[tab_cnt] = join_col_cnt;
misses = join_col_cnt;
join_col_cnt = 0;
tab_cnt++;
}
else {
misses--;
}
}
void emit_distinct()
{
op_type.push("DISTINCT");
distinct_cnt++;
}
void emit_year()
{
op_type.push("YEAR");
}
void emit_month()
{
op_type.push("MONTH");
}
void emit_day()
{
op_type.push("DAY");
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(const char *s, ...)
{
}
void emit_var(const char *s, const int c, const char *f, const char* ref, const char* ref_name)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(const char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(const char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_sort(const char *s, const int p)
{
op_sort.push(s);
partition_count = p;
}
void emit_presort(const char *s)
{
op_presort.push(s);
}
void emit_varchar(const char *s, const int c, const char *f, const int d, const char *ref, const char* ref_name)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_vardecimal(const char *s, const int c, const char *f, const int scale, const int precision)
{
namevars.push(s);
typevars.push(f);
sizevars.push(precision);
cols.push(c);
}
void emit_sel_name(const char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(const char *s, const char tp)
{
op_join.push(s);
join_tab_cnt++;
join_type.push(tp);
};
void order_inplace_host(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str)
{
unsigned int* permutation = new unsigned int[a->mRecCount];
thrust::sequence(permutation, permutation + a->mRecCount);
char* temp = new char[a->mRecCount*max_char(a)];
stack<string> exe_type1(exe_type), exe_value;
while(!exe_type1.empty()) {
exe_value.push("ASC");
exe_type1.pop();
};
// sort on host
for(;!exe_type.empty(); exe_type.pop(),exe_value.pop()) {
if (a->type[exe_type.top()] != 1)
update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp);
else
update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp);
};
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
if (a->type[*it] != 1) {
thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_int[*it].data(), (int_type*)temp);
thrust::copy((int_type*)temp, (int_type*)temp + a->mRecCount, a->h_columns_int[*it].data());
}
else {
thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_float[*it].data(), (float_type*)temp);
thrust::copy((float_type*)temp, (float_type*)temp + a->mRecCount, a->h_columns_float[*it].data());
}
};
delete [] temp;
delete [] permutation;
}
void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str)
{
if(scratch.size() < a->mRecCount*4)
scratch.resize(a->mRecCount*4);
thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::sequence(permutation, permutation+a->mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
if(a->grp.size() < a->mRecCount*8)
a->grp.resize(a->mRecCount*8);
unsigned int bits;
for(; !exe_type.empty(); exe_type.pop()) {
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[exe_type.top()];
if (a->type[exe_type.top()] != 1) {
update_permutation(a->d_columns_int[exe_type.top()], raw_ptr, a->mRecCount, "ASC", (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
}
else
update_permutation(a->d_columns_float[exe_type.top()], raw_ptr, a->mRecCount,"ASC", (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
};
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[*it];
if (a->type[*it] != 1) {
apply_permutation(a->d_columns_int[*it], raw_ptr, a->mRecCount, (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
}
else {
apply_permutation(a->d_columns_float[*it], raw_ptr, a->mRecCount, (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits);
};
};
}
bool check_star_join(const string j1)
{
auto op_vals(op_value);
for(auto i=0; i < sel_count; i++) {
op_vals.pop();
op_vals.pop();
};
if(join_tab_cnt > 0) {
while(op_vals.size()) {
if (std::find(varNames[j1]->columnNames.begin(), varNames[j1]->columnNames.end(), op_vals.front()) != varNames[j1]->columnNames.end()) {
op_vals.pop();
op_vals.pop();
}
else {
return 0;
};
};
if(join_tab_cnt == 1) {
if(!check_bitmap_file_exist(varNames[j1], varNames[op_join.front()])) {
return 0;
};
};
return 1;
}
else
return 0;
}
void star_join(const char *s, const string j1)
{
map<string,bool> already_copied;
queue<string> op_left;
CudaSet* left = varNames.find(j1)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(auto i=0; i < sel_count; i++) {
if(std::find(left->columnNames.begin(), left->columnNames.end(), op_value.front()) != left->columnNames.end())
op_left.push(op_value.front());
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
auto op_sel_s(op_sel), op_sel_s_as(op_sel_as), op_g(op_value);
CudaSet* c = new CudaSet(op_sel_s, op_sel_s_as);
string f1, f2;
map<string, string> key_map;
map<string, char> sort_map;
map<string, string> r_map;
for(auto i = 0; i < join_tab_cnt; i++) {
f1 = op_g.front();
op_g.pop();
f2 = op_g.front();
op_g.pop();
r_map[f1] = f2;
queue<string> op_jj(op_join);
for(auto z = 0; z < (join_tab_cnt-1) - i; z++)
op_jj.pop();
size_t rcount;
queue<string> op_vd(op_g), op_alt(op_sel);
unsigned int jc = join_col_cnt;
while(jc) {
jc--;
op_vd.pop();
op_alt.push(op_vd.front());
op_vd.pop();
};
key_map[op_jj.front()] = f1;
CudaSet* right = varNames.find(op_jj.front())->second;
if(!check_bitmaps_exist(left, right)) {
cout << "Required bitmap on table " << op_jj.front() << " doesn't exists" << endl;
exit(0);
};
queue<string> second;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front()) != 0 && std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
second.push(op_alt.front());
//cout << "col " << op_alt.front() << " " << op_jj.front() << endl;
op_left.push(f1);
};
op_alt.pop();
};
if(!second.empty()) {
right->filtered = 0;
right->mRecCount = right->maxRecs;
load_queue(second, right, "", rcount, 0, right->segCount, 0,0); // put all used columns into GPU
};
};
queue<string> idx;
set<string> already_loaded;
bool right_cpy = 0;
for (unsigned int i = 0; i < left->segCount; i++) {
std::clock_t start2 = std::clock();
if(verbose)
cout << "segment " << i << " " << getFreeMem() << endl;
idx = left->fil_value;
already_loaded.clear();
while(!idx.empty()) {
//load the index
if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) {
//extract table name and colname from index name
already_loaded.insert(idx.front());
size_t pos1 = idx.front().find_first_of(".", 0);
size_t pos2 = idx.front().find_first_of(".", pos1+1);
CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second;
char a;
//cout << "loading index " << idx.front() << endl;
a = left->loadIndex(idx.front(), i);
sort_map[idx.front().substr(pos1+1, pos2-pos1-1)] = a;
};
idx.pop();
};
left->filtered = 0;
size_t cnt_c = 0;
allocColumns(left, left->fil_value);
copyColumns(left, left->fil_value, i, cnt_c);
bool* res = filter(left->fil_type, left->fil_value, left->fil_nums, left->fil_nums_f, left->fil_nums_precision, left, i);
thrust::device_ptr<bool> star((bool*)res);
size_t cnt = thrust::count(star, star + (unsigned int)left->mRecCount, 1);
//cout << "join res " << cnt << " out of " << left->mRecCount << endl;
thrust::host_vector<unsigned int> prm_vh(cnt);
thrust::device_vector<unsigned int> prm_v(cnt);
thrust::host_vector<unsigned int> prm_tmp(cnt);
thrust::device_vector<unsigned int> prm_tmp_d(cnt);
//std::cout<< "seg filter " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
if(cnt) { //gather
//start1 = std::clock();
left->prm_d.resize(cnt);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)left->mRecCount-1),
star, left->prm_d.begin(), thrust::identity<bool>());
thrust::device_free(star);
prm_vh = left->prm_d;
size_t offset = c->mRecCount;
c->resize_join(cnt);
queue<string> op_sel1(op_sel_s);
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, cnt*max_char(c)));
cudaMemset(temp,0,cnt*max_char(c));
CudaSet *t;
unsigned int cnt1, bits;
int_type lower_val;
thrust::device_vector<unsigned int> output(cnt);
//std::cout<< "seg start " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
while(!op_sel1.empty()) {
if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end()) {
if(left->filtered)
t = varNames[left->source_name];
else
t = left;
if(left->type[op_sel1.front()] <= 1) {
if(ssd && !interactive) {
//start1 = std::clock();
lower_val = t->readSsdSegmentsFromFile(i, op_sel1.front(), offset, prm_vh, c);
//std::cout<< "SSD L SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl;
}
else {
t->readSegmentsFromFile(i, op_sel1.front());
void* h;
if(!interactive) {
if(left->type[op_sel1.front()] == 0)
h = t->h_columns_int[op_sel1.front()].data();
else
h = t->h_columns_float[op_sel1.front()].data();
}
else {
string ff = t->load_file_name + "." + op_sel1.front()+ "." + to_string(i);
h = buffers[ff];
};
cnt1 = ((unsigned int*)h)[0];//bytes
lower_val = ((int_type*)(((unsigned int*)h)+1))[0];
bits = ((unsigned int*)((char*)h + cnt1))[8];
//cout << cnt1 << " " << lower_val << " " << bits << " " << left->type[op_sel1.front()] << endl;
if(bits == 8) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), ptr + offset);
};
}
else if(bits == 16) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), ptr + offset);
};
}
else if(bits == 32) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), ptr + offset);
}
}
else if(bits == 64) {
if(left->type[op_sel1.front()] == 0) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), ptr + offset);
};
};
};
if(left->type[op_sel1.front()] != 1)
thrust::transform( c->h_columns_int[op_sel1.front()].begin() + offset, c->h_columns_int[op_sel1.front()].begin() + offset + cnt,
thrust::make_constant_iterator(lower_val), c->h_columns_int[op_sel1.front()].begin() + offset, thrust::plus<int_type>());
else {
int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data();
thrust::transform(ptr + offset, ptr + offset + cnt,
thrust::make_constant_iterator(lower_val), ptr + offset, thrust::plus<int_type>());
thrust::transform(ptr + offset, ptr + offset + cnt, c->h_columns_float[op_sel1.front()].begin() + offset, long_to_float());
};
}
else { //gather string. There are no strings in fact tables.
};
}
else {
for(auto it = key_map.begin(); it != key_map.end(); it++) {
CudaSet* r = varNames.find(it->first)->second;
if(std::find(r->columnNames.begin(), r->columnNames.end(), op_sel1.front()) != r->columnNames.end()) {
if(i == 0) {
if(data_dict[varNames[it->first]->load_file_name][op_sel1.front()].col_type == 2) {
//cout << "SET " << op_sel1.front() << " to " << varNames[it->first]->load_file_name + "." + op_sel1.front() << endl;
c->string_map[op_sel1.front()] = varNames[it->first]->load_file_name + "." + op_sel1.front();
};
}
if(left->filtered)
t = varNames[left->source_name];
else
t = left;
if(ssd && !interactive) {
//start1 = std::clock();
lower_val = t->readSsdSegmentsFromFileR(i, key_map[it->first], prm_vh, prm_tmp);
//std::cout<< "SSD R SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl;
}
else {
t->readSegmentsFromFile(i, key_map[it->first]);
void* h;
if(!interactive) {
h = t->h_columns_int[key_map[it->first]].data();
}
else {
string ff = t->load_file_name + "." + key_map[it->first] + "." + to_string(i);
h = buffers[ff];
};
cnt1 = ((unsigned int*)h)[0];
lower_val = ((int_type*)(((unsigned int*)h)+1))[0];
bits = ((unsigned int*)((char*)h + cnt1))[8];
//cout << cnt1 << " " << lower_val << " " << bits << endl;
if(bits == 8) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), prm_tmp.begin());
}
else if(bits == 16) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), prm_tmp.begin());
}
else if(bits == 32) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), prm_tmp.begin());
}
else if(bits == 64) {
thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), prm_tmp.begin());
};
};
if(lower_val != 1)
thrust::transform(prm_tmp.begin(), prm_tmp.end(), thrust::make_constant_iterator(lower_val-1), prm_tmp.begin(), thrust::plus<unsigned int>());
if(sort_map[r->source_name] == '1') { // sorted consecutive starting with 1 dimension keys
prm_tmp_d = prm_tmp;
//cout << "PATH 1 " << endl;
}
else {
//cout << "PATH 2 " << r->source_name << endl;
output = prm_tmp;
if(r->d_columns_int[r_map[key_map[it->first]]].size() == 0) {
r->d_columns_int[r_map[key_map[it->first]]].resize(r->maxRecs);
};
if(right_cpy == 0) {
r->CopyColumnToGpu(r_map[key_map[it->first]]);
};
thrust::lower_bound(r->d_columns_int[r_map[key_map[it->first]]].begin(), r->d_columns_int[r_map[key_map[it->first]]].end(),
output.begin(), output.end(),
prm_tmp_d.begin());
};
if(r->type[op_sel1.front()] != 1) {
thrust::device_ptr<int_type> d_tmp((int_type*)temp);
thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_tmp((float_type*)temp);
thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_float[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_float[op_sel1.front()].begin() + offset);
};
break;
};
};
};
op_sel1.pop();
//std::cout<< ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl;
};
cudaFree(temp);
right_cpy = 1;
};
//std::cout<< "SEG " << i << " " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
//unload the segment indexes :
idx = left->fil_value;
already_loaded.clear();
while(!idx.empty()) {
if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) {
//extract table name and colname from index name
already_loaded.insert(idx.front());
size_t pos1 = idx.front().find_first_of(".", 0);
size_t pos2 = idx.front().find_first_of(".", pos1+1);
CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second;
string f1 = idx.front() + "." + to_string(i);
auto it = index_buffers.find(f1);
if(it != index_buffers.end()) {
cudaFreeHost(index_buffers[f1]);
index_buffers.erase(it);
};
};
idx.pop();
};
};
//if(verbose)
// std::cout<< "star join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
while(!op_join.empty()) {
varNames[op_join.front()]->deAllocOnDevice();
op_join.pop();
};
varNames[s] = c;
c->maxRecs = c->mRecCount;
if(verbose)
cout << endl << "join count " << c->mRecCount << endl;
};
void emit_join(const char *s, const char *j1, const int grp, const int start_seg, const int end_seg)
{
//cout << "emit_join " << s << " " << join_tab_cnt << " " << op_join.front() << endl;
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end() && data_dict.count(j1) == 0) {
process_error(2, "Join : couldn't find variable " + string(j1) );
};
if (stat.find(op_join.front()) == stat.end() && data_dict.count(op_join.front()) == 0) {
process_error(2, "Join : couldn't find variable " + op_join.front() );
};
stat[s] = statement_count;
stat[j1] = statement_count;
if(filter_var.find(j1) != filter_var.end()) {
stat[filter_var[j1]] = statement_count;
};
check_used_vars();
while(!op_join.empty()) {
stat[op_join.front()] = statement_count;
if(filter_var.find(op_join.front()) != filter_var.end()) {
stat[filter_var[op_join.front()]] = statement_count;
};
op_join.pop();
};
return;
};
queue<string> op_m(op_value);
if(check_star_join(j1)) {
if(verbose)
cout << "executing star join !! " << endl;
star_join(s, j1);
}
else {
if(join_tab_cnt > 1) {
string tab_name;
for(unsigned int i = 1; i <= join_tab_cnt; i++) {
if(i == join_tab_cnt)
tab_name = s;
else
tab_name = s + to_string(i);
string j, j2;
if(i == 1) {
j2 = op_join.front();
op_join.pop();
j = op_join.front();
op_join.pop();
}
else {
if(!op_join.empty()) {
j = op_join.front();
op_join.pop();
}
else
j = j1;
j2 = s + to_string(i-1);
};
emit_multijoin(tab_name, j, j2, i, s, start_seg, end_seg);
op_value = op_m;
};
}
else {
emit_multijoin(s, j1, op_join.front(), 1, s, start_seg, end_seg);
op_join.pop();
};
};
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_m.front());
op_m.pop();
op_sel_as.push(op_m.front());
op_m.pop();
};
while(!op_sel_as.empty()) {
//cout << "alias " << op_sel.front() << " : " << op_sel_as.front() << endl;
if(op_sel.front() != op_sel_as.front()) {
if(varNames[s]->type[op_sel.front()] == 0) {
varNames[s]->h_columns_int[op_sel_as.front()] = varNames[s]->h_columns_int[op_sel.front()];
varNames[s]->h_columns_int.erase(op_sel.front());
varNames[s]->d_columns_int[op_sel_as.front()] = varNames[s]->d_columns_int[op_sel.front()];
varNames[s]->d_columns_int.erase(op_sel.front());
varNames[s]->type[op_sel_as.front()] = 0;
varNames[s]->type.erase(op_sel.front());
}
else if(varNames[s]->type[op_sel.front()] == 1) {
varNames[s]->h_columns_float[op_sel_as.front()] = varNames[s]->h_columns_float[op_sel.front()];
varNames[s]->h_columns_float.erase(op_sel.front());
varNames[s]->d_columns_float[op_sel_as.front()] = varNames[s]->d_columns_float[op_sel.front()];
varNames[s]->d_columns_float.erase(op_sel.front());
varNames[s]->type[op_sel_as.front()] = 1;
varNames[s]->type.erase(op_sel.front());
varNames[s]->decimal.erase(op_sel.front());
}
else {
varNames[s]->h_columns_char[op_sel_as.front()] = varNames[s]->h_columns_char[op_sel.front()];
varNames[s]->h_columns_char.erase(op_sel.front());
varNames[s]->d_columns_char[op_sel_as.front()] = varNames[s]->d_columns_char[op_sel.front()];
varNames[s]->d_columns_char.erase(op_sel.front());
varNames[s]->type[op_sel_as.front()] = 2;
varNames[s]->type.erase(op_sel.front());
varNames[s]->char_size[op_sel_as.front()] = varNames[s]->char_size[op_sel.front()];
varNames[s]->char_size.erase(op_sel.front());
};
varNames[s]->decimal[op_sel_as.front()] = varNames[s]->decimal[op_sel.front()];
auto it = std::find(varNames[s]->columnNames.begin(), varNames[s]->columnNames.end(), op_sel.front());
*it = op_sel_as.front();
};
op_sel_as.pop();
op_sel.pop();
};
clean_queues();
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(op_join.size()) {
if(stat[op_join.front()] == statement_count && op_join.front().compare(j1) != 0) {
varNames[op_join.front()]->free();
varNames.erase(op_join.front());
};
};
}
template<typename T, typename P>
void p_gather(thrust::host_vector<int>& h_tmp, T* h, P* dest)
{
for(int i = 0; i < h_tmp.size(); i++) {
dest[i] = h[h_tmp[i]];
};
};
void emit_multijoin(const string s, const string j1, const string j2, const unsigned int tab, const char* res_name, const int start_segment, const int end_segment)
{
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
if(varNames.find(j1) == varNames.end())
cout << "Couldn't find j1 " << j1 << endl;
if(varNames.find(j2) == varNames.end())
cout << "Couldn't find j2 " << j2 << " here " << endl;
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
queue<string> op_sel_s(op_sel);
queue<string> op_sel_s_as(op_sel_as);
queue<string> op_g(op_value);
if(tab > 0) {
for(unsigned int z = 0; z < join_tab_cnt - tab; z++) {
for(unsigned int j = 0; j < join_and_cnt[z]*2 + 2; j++) {
op_sel_s.push(op_g.front());
op_sel_s_as.push(op_g.front());
op_g.pop();
};
};
};
string f1 = op_g.front();
op_g.pop();
string f2 = op_g.front();
op_g.pop();
if (verbose)
cout << "JOIN " << s << " " << f1 << " " << f2 << " " << getFreeMem() << " " << phase_copy << endl;
std::clock_t start1 = std::clock();
CudaSet* c = new CudaSet(right, left, op_sel_s, op_sel_s_as);
if ((left->mRecCount == 0 && !left->filtered) || (right->mRecCount == 0 && !right->filtered)) {
c = new CudaSet(left, right, op_sel_s, op_sel_s_as);
varNames[res_name] = c;
clean_queues();
return;
};
if(join_tab_cnt > 1 && tab < join_tab_cnt)
c->tmp_table = 1;
else
c->tmp_table = 0;
string colname1, colname2;
string tmpstr;
if (std::find(left->columnNames.begin(), left->columnNames.end(), f1) != left->columnNames.end()) {
colname1 = f1;
if (std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
colname2 = f2;
}
else {
process_error(2, "Couldn't find column " + f2 );
};
}
else if (std::find(right->columnNames.begin(), right->columnNames.end(), f1) != right->columnNames.end()) {
colname2 = f1;
tmpstr = f1;
f1 = f2;
if (std::find(left->columnNames.begin(), left->columnNames.end(), f2) != left->columnNames.end()) {
colname1 = f2;
f2 = tmpstr;
}
else {
process_error(2, "Couldn't find column " +f2 );
};
}
else {
process_error(2, "Couldn't find column " + f1);
};
if (!((left->type[colname1] == 0 && right->type[colname2] == 0) || (left->type[colname1] == 2 && right->type[colname2] == 2)
|| (left->type[colname1] == 1 && right->type[colname2] == 1 && left->decimal[colname1] && right->decimal[colname2]))) {
process_error(2, "Joins on floats are not supported ");
};
//bool decimal_join = 0;
//if (left->type[colname1] == 1 && right->type[colname2] == 1)
// decimal_join = 1;
queue<string> op_vd(op_g);
queue<string> op_g1(op_g);
queue<string> op_alt(op_sel);
unsigned int jc = join_and_cnt[join_tab_cnt - tab];
while(jc) {
jc--;
op_vd.pop();
op_alt.push(op_vd.front());
op_vd.pop();
};
size_t rcount = 0, cnt_r;
queue<string> cc;
if (left->type[colname1] == 2) {
left->d_columns_int[colname1] = thrust::device_vector<int_type>();
}
else {
cc.push(f1);
allocColumns(left, cc);
};
left->hostRecCount = left->mRecCount;
size_t cnt_l, res_count, tot_count = 0, offset = 0, k = 0;
queue<string> lc(cc);
thrust::device_vector<unsigned int> v_l(left->maxRecs);
//MGPU_MEM(int) aIndicesDevice, bIndicesDevice, intersectionDevice;
stack<string> exe_type;
set<string> field_names;
exe_type.push(f2);
for(unsigned int i = 0; i < right->columnNames.size(); i++) {
if (std::find(c->columnNames.begin(), c->columnNames.end(), right->columnNames[i]) != c->columnNames.end() || right->columnNames[i] == f2 || join_and_cnt[join_tab_cnt - tab]) {
field_names.insert(right->columnNames[i]);
};
};
thrust::device_vector<int> p_tmp;
unsigned int start_part = 0;
bool prejoin = 0;
while(start_part < right->segCount) {
right->deAllocOnDevice();
std::clock_t start12 = std::clock();
if(right->not_compressed || (!right->filtered && getFreeMem() < right->columnNames.size()*right->hostRecCount*8*2)) {
cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, start_part+1);
start_part = start_part+1;
}
else {
cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, right->segCount);
start_part = right->segCount;
for(unsigned int i=0; i < right->columnNames.size(); i++) {
if (right->type[right->columnNames[i]] != 1) {
right->d_columns_int[right->columnNames[i]].shrink_to_fit();
}
else
right->d_columns_float[right->columnNames[i]].shrink_to_fit();
};
};
right->mRecCount = cnt_r;
bool order = 1;
if(!right->presorted_fields.empty() && right->presorted_fields.front() == f2) {
order = 0;
//cout << "No need to sort " << endl;
if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size())
right->sort_check = '1';
else {
right->sort_check = '0';
};
};
if(order) {
if(thrust::is_sorted(right->d_columns_int[f2].begin(), right->d_columns_int[f2].end())) {
if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) {
right->sort_check = '1';
}
else {
right->sort_check = '0';
};
}
else {
//cout << "sorting " << endl;
size_t tot_size = right->mRecCount*8*right->columnNames.size();
if (getFreeMem() > tot_size*1.5) {
order_inplace(right, exe_type, field_names, 0);
}
else {
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
//cout << "sorting " << *it << endl;
if(right->type[*it] != 1) {
if(right->h_columns_int[*it].size() < right->mRecCount)
right->h_columns_int[*it].resize(right->mRecCount);
thrust::copy(right->d_columns_int[*it].begin(), right->d_columns_int[*it].begin() + right->mRecCount, right->h_columns_int[*it].begin());
}
else {
if(right->type[*it] == 1) {
if(right->h_columns_float[*it].size() < right->mRecCount)
right->h_columns_float[*it].resize(right->mRecCount);
};
thrust::copy(right->d_columns_float[*it].begin(), right->d_columns_float[*it].begin() + right->mRecCount, right->h_columns_float[*it].begin());
};
};
order_inplace_host(right, exe_type, field_names, 0);
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
if(right->type[*it] != 1)
thrust::copy(right->h_columns_int[*it].begin(), right->h_columns_int[*it].begin() + right->mRecCount, right->d_columns_int[*it].begin());
else
thrust::copy(right->h_columns_float[*it].begin(), right->h_columns_float[*it].begin() + right->mRecCount, right->d_columns_float[*it].begin());
};
};
};
};
//std::cout<< "join right load time " << ( ( std::clock() - start12 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
int e_segment;
if(end_segment == -1) {
e_segment = left->segCount;
}
else
e_segment = end_segment;
for (unsigned int i = start_segment; i < e_segment; i++) {
if(verbose)
//cout << "segment " << i << '\xd';
cout << "segment " << i << endl;
cnt_l = 0;
copyColumns(left, lc, i, cnt_l);
cnt_l = left->mRecCount;
auto join_eq_type1(join_eq_type);
if (cnt_l) {
// sort the left index column, save the permutation vector, it might be needed later
thrust::device_ptr<int_type> d_col((int_type*)thrust::raw_pointer_cast(left->d_columns_int[colname1].data()));
thrust::sequence(v_l.begin(), v_l.begin() + cnt_l,0,1);
bool do_sort = 1;
if(!left->sorted_fields.empty()) {
if(left->sorted_fields.front() == f1) {
do_sort = 0;
};
}
else if(!left->presorted_fields.empty()) {
if(left->presorted_fields.front() == f1) {
do_sort = 0;
};
};
if(do_sort) {
thrust::sort_by_key(d_col, d_col + cnt_l, v_l.begin());
}
else if(verbose)
cout << "No need of sorting " << endl;
if(prejoin) {
//res_count = SetOpKeys<MgpuSetOpIntersection, true>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
// thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
// &intersectionDevice, *context, false);
//if(!res_count)
// continue;
};
if (left->d_columns_int[colname1][0] > right->d_columns_int[colname2][cnt_r-1] ||
left->d_columns_int[colname1][cnt_l-1] < right->d_columns_int[colname2][0]) {
if(verbose)
cout << endl << "skipping after copying " << endl;
continue;
};
//else
// cout << "JOINING " << left->d_columns_int[colname1][0] << ":" << left->d_columns_int[colname1][cnt_l-1] << " AND " << right->d_columns_int[colname2][0] << ":" << right->d_columns_int[colname2][cnt_r-1] << endl;
//cout << "joining " << left->d_columns_int[colname1][0] << " : " << left->d_columns_int[colname1][cnt_l-1] << " and " << right->d_columns_int[colname2][0] << " : " << right->d_columns_int[colname2][cnt_r-1] << endl;
char join_kind = join_type.front();
std::clock_t start11 = std::clock();
mem_t<int2> res;
if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') {
//res_count = RelationalJoin<MgpuJoinKindInner>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
// thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
// &aIndicesDevice, &bIndicesDevice,
// mgpu::less<int_type>(), *context);
res = inner_join(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, less_t<int_type>(), context);
};
res_count = res.size();
/* else if(join_kind == 'L')
res_count = RelationalJoin<MgpuJoinKindLeft>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
else if(join_kind == 'R')
res_count = RelationalJoin<MgpuJoinKindRight>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
else if(join_kind == 'O')
res_count = RelationalJoin<MgpuJoinKindOuter>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l,
thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
*/
if(verbose)
std::cout<< "join time " << ( ( std::clock() - start11 ) / (double)CLOCKS_PER_SEC ) << '\n';
if(verbose)
cout << "RES " << res_count << endl;
if(res_count == 0)
prejoin = 1;
thrust::device_ptr<int> d_res1 = thrust::device_malloc<int>(res_count);
thrust::device_ptr<int> d_res2 = thrust::device_malloc<int>(res_count);
thrust::counting_iterator<unsigned int> begin(0);
split_int2 ff(thrust::raw_pointer_cast(d_res1), thrust::raw_pointer_cast(d_res2), res.data());
thrust::for_each(begin, begin + res_count, ff);
if(res_count) {
p_tmp.resize(res_count);
thrust::sequence(p_tmp.begin(), p_tmp.end(),-1);
thrust::gather_if(d_res1, d_res1+res_count, d_res1, v_l.begin(), p_tmp.begin(), _1 >= 0);
};
// check if the join is a multicolumn join
unsigned int mul_cnt = join_and_cnt[join_tab_cnt - tab];
while(mul_cnt) {
mul_cnt--;
queue<string> mult(op_g);
string f3 = mult.front();
mult.pop();
string f4 = mult.front();
mult.pop();
//cout << "ADDITIONAL COL JOIN " << f3 << " " << f4 << " " << join_eq_type.front() << endl;
queue<string> rc;
rc.push(f3);
allocColumns(left, rc);
size_t offset = 0;
copyColumns(left, rc, i, offset, 0, 0);
rc.pop();
if (res_count) {
thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(res_count);
if(right->d_columns_int[f4].size() == 0)
load_queue(rc, right, f4, rcount, 0, right->segCount, 0, 0);
if (left->type[f3] == 1 && right->type[f4] == 1) {
thrust::transform(make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.begin()),
make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.end()),
make_permutation_iterator(right->d_columns_float[f4].begin(), d_res2),
d_add, float_equal_to());
}
else {
if(join_eq_type1.front() != 'N')
thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()),
make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()),
make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2),
d_add, thrust::equal_to<int_type>());
else {
thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()),
make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()),
make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2),
d_add, thrust::not_equal_to<int_type>());
};
};
if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') { // result count changes only in case of an inner join
unsigned int new_cnt = thrust::count(d_add, d_add+res_count, 1);
thrust::stable_partition(d_res2, d_res2 + res_count, d_add, thrust::identity<unsigned int>());
thrust::stable_partition(p_tmp.begin(), p_tmp.end(), d_add, thrust::identity<unsigned int>());
res_count = new_cnt;
}
else { //otherwise we consider it a valid left join result with non-nulls on the left side and nulls on the right side
thrust::transform(d_res2, d_res2 + res_count, d_add , d_res2, set_minus());
};
thrust::device_free(d_add);
};
if(!join_eq_type1.empty())
join_eq_type1.pop();
};
while(!join_eq_type1.empty())
join_eq_type1.pop();
//cout << "MUL res_count " << res_count << endl;
if(join_kind == '1') { //LEFT SEMI
thrust::sort(p_tmp.begin(), p_tmp.begin() + res_count);
auto new_end = thrust::unique(p_tmp.begin(), p_tmp.begin() + res_count);
res_count = new_end - p_tmp.begin();
}
else if(join_kind == '2'){ // RIGHT SEMI
thrust::sort(d_res2, d_res2 + res_count);
auto new_end = thrust::unique(d_res2, d_res2 + res_count);
res_count = new_end - d_res2;
auto old_sz = ranj.size();
ranj.resize(ranj.size() + res_count);
thrust::copy(d_res2, d_res2 + res_count, ranj.begin() + old_sz);
thrust::sort(ranj.begin(), ranj.end());
auto ra_cnt = thrust::unique(ranj.begin(), ranj.end());
ranj.resize(ra_cnt-ranj.begin());
}
else if(join_kind == '3'){ // ANTI JOIN LEFT
thrust::counting_iterator<int> iter(0);
thrust::device_vector<int> rr(cnt_l);
auto new_end = thrust::set_difference(iter, iter+cnt_l, p_tmp.begin(), p_tmp.begin() + res_count, rr.begin());
res_count = new_end - rr.begin();
thrust::copy(rr.begin(), new_end, p_tmp.begin());
}
else if(join_kind == '4'){ // ANTI JOIN RIGHT
thrust::sort(d_res2, d_res2 + res_count);
auto new_end = thrust::unique(d_res2, d_res2 + res_count);
auto cnt = new_end - d_res2;
thrust::device_vector<int> seq(cnt + ranj.size());
//auto new_end = thrust::set_difference(seq.begin(), seq.end(), d_res2, d_res2 + res_count, rr.begin());
auto new_end1 = thrust::set_union(d_res2, d_res2 + cnt, ranj.begin(), ranj.end(), seq.begin());
auto s_cnt = new_end1 - seq.begin();
thrust::sort(seq.begin(), seq.begin() + s_cnt);
auto end_seq = thrust::unique(seq.begin(), seq.begin() + s_cnt);
auto u_cnt = end_seq - seq.begin();
ranj.resize(u_cnt);
thrust::copy(seq.begin(), seq.begin() + u_cnt, ranj.begin());
thrust::sort(ranj.begin(), ranj.end());
auto ra_cnt = thrust::unique(ranj.begin(), ranj.end());
ranj.resize(ra_cnt-ranj.begin());
}
tot_count = tot_count + res_count;
//cout << "tot " << tot_count << endl;
//std::clock_t start12 = std::clock();
if(res_count && join_kind != '4' && join_kind != '2') {
offset = c->mRecCount;
queue<string> op_sel1(op_sel_s);
c->resize_join(res_count);
if(scratch.size() < res_count*int_size)
scratch.resize(res_count*int_size);
thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0);
std::map<string,bool> processed;
while(!op_sel1.empty()) {
if (processed.find(op_sel1.front()) != processed.end()) {
op_sel1.pop();
continue;
}
else
processed[op_sel1.front()] = 1;
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end() && join_kind != '2') {
allocColumns(left, cc);
copyColumns(left, cc, i, k, 0, 0);
//gather
if(left->type[op_sel1.front()] != 1 ) {
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_float[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset);
};
if(op_sel1.front() != colname1)
left->deAllocColumnOnDevice(op_sel1.front());
//};
}
else if(std::find(right->columnNames.begin(), right->columnNames.end(), op_sel1.front()) != right->columnNames.end()) {
//gather
if(right->type[op_sel1.front()] != 1) {
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(d_res2, d_res2 + res_count, right->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(d_res2, d_res2 + res_count, right->d_columns_float[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset);
}
}
else {
};
op_sel1.pop();
};
};
thrust::device_free(d_res1);
thrust::device_free(d_res2);
};
};
if(join_type.front() == '4') {
thrust::device_vector<int> st(cnt_r);
thrust::sequence(st.begin(), st.end(),0,1);
thrust::device_vector<int> r(cnt_r);
auto new_end = thrust::set_difference(st.begin(), st.end(), ranj.begin(), ranj.end(), r.begin());
ranj.resize(0);
res_count = new_end - r.begin();
tot_count = res_count;
queue<string> op_sel1(op_sel_s);
c->resize_join(res_count);
if(scratch.size() < res_count*int_size)
scratch.resize(res_count*int_size);
thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0);
std::map<string,bool> processed;
while(!op_sel1.empty()) {
if (processed.find(op_sel1.front()) != processed.end()) {
op_sel1.pop();
continue;
}
else
processed[op_sel1.front()] = 1;
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(r.begin(), r.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin());
op_sel1.pop();
};
}
else if(join_type.front() == '2') {
res_count = ranj.size();
tot_count = res_count;
queue<string> op_sel1(op_sel_s);
c->resize_join(res_count);
if(scratch.size() < res_count*int_size)
scratch.resize(res_count*int_size);
thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0);
std::map<string,bool> processed;
while(!op_sel1.empty()) {
if (processed.find(op_sel1.front()) != processed.end()) {
op_sel1.pop();
continue;
}
else
processed[op_sel1.front()] = 1;
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::gather(ranj.begin(), ranj.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin());
op_sel1.pop();
};
ranj.resize(0);
};
};
left->deAllocOnDevice();
right->deAllocOnDevice();
c->deAllocOnDevice();
varNames[s] = c;
c->mRecCount = tot_count;
c->hostRecCount = tot_count;
c->name = s;
if(verbose)
cout << "tot res " << tot_count << " " << getFreeMem() << endl;
if(right->tmp_table == 1) {
right->free();
varNames.erase(j2);
}
else {
if(stat[j2] == statement_count) {
right->free();
varNames.erase(j2);
};
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
join_type.pop();
if(!join_eq_type.empty())
join_eq_type.pop();
size_t tot_size = tot_count*8*c->columnNames.size();
if (getFreeMem() > tot_size) {
c->maxRecs = tot_count;
c->segCount = 1;
}
else {
c->segCount = ((tot_size)/getFreeMem() + 1);
c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1);
};
if(verbose)
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl;
}
void order_on_host(CudaSet *a, CudaSet* b, queue<string> names, stack<string> exe_type, stack<string> exe_value)
{
unsigned int tot = 0;
if(!a->not_compressed) { //compressed
allocColumns(a, names);
unsigned int c = 0;
size_t cnt = 0;
for(unsigned int i = 0; i < a->segCount; i++) {
copyColumns(a, names, (a->segCount - i) - 1, cnt); //uses segment 1 on a host to copy data from a file to gpu
if (a->mRecCount) {
a->CopyToHost((c - tot) - a->mRecCount, a->mRecCount);
tot = tot + a->mRecCount;
};
};
}
else
tot = a->mRecCount;
b->resize(tot); //resize host arrays
a->mRecCount = tot;
unsigned int* permutation = new unsigned int[a->mRecCount];
thrust::sequence(permutation, permutation + a->mRecCount);
size_t maxSize = a->mRecCount;
char* temp;
temp = new char[maxSize*max_char(a)];
// sort on host
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
if (a->type[exe_type.top()] == 0)
update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp);
else if (a->type[exe_type.top()] == 1)
update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
update_char_permutation(a, exe_type.top(), permutation, exe_value.top(), temp, 1);
};
};
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if (a->type[a->columnNames[i]] != 1) {
apply_permutation_host(a->h_columns_int[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_int[a->columnNames[i]].data());
}
else
apply_permutation_host(a->h_columns_float[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_float[a->columnNames[i]].data());
};
delete [] temp;
delete [] permutation;
}
void emit_order(const char *s, const char *f, const int e, const int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Order : couldn't find variable " + string(f));
};
stat[s] = statement_count;
stat[f] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
return;
};
if (scan_state == 0) {
check_used_vars();
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
stack<string> exe_type, exe_value;
if(verbose)
cout << "ORDER: " << s << " " << f << endl;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
if(std::find(a->columnNames.begin(), a->columnNames.end(), exe_type.top()) == a->columnNames.end()) {
process_error(2, "Couldn't find name " + exe_type.top());
};
};
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
tp.pop();
};
queue<string> names;
for (unsigned int i = 0; i < a->columnNames.size() ; i++ )
names.push(a->columnNames[i]);
CudaSet *b = a->copyDeviceStruct();
//lets find out if our data set fits into a GPU
size_t mem_available = getFreeMem();
size_t rec_size = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[a->columnNames[i]] == 0)
rec_size = rec_size + int_size;
else if(a->type[a->columnNames[i]] == 1)
rec_size = rec_size + float_size;
else
rec_size = rec_size + a->char_size[a->columnNames[i]];
};
bool fits;
if (rec_size*a->mRecCount > (mem_available/2)) // doesn't fit into a GPU
fits = 0;
else fits = 1;
if(!fits) {
order_on_host(a, b, names, exe_type, exe_value);
}
else {
// initialize permutation to [0, 1, 2, ... ,N-1]
size_t rcount;
if(a->filtered) {
CudaSet *t = varNames[a->source_name];
a->mRecCount = t->mRecCount;
a->hostRecCount = a->mRecCount;
};
a->mRecCount = load_queue(names, a, op_vx.front(), rcount, 0, a->segCount);
if(scratch.size() < a->mRecCount)
scratch.resize(a->mRecCount*4);
thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* perm_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, a->mRecCount*max_char(a)));
if(a->filtered)
varNames[a->source_name]->hostRecCount = varNames[a->source_name]->mRecCount;
else
a->hostRecCount = a->mRecCount;;
if(a->filtered)
varNames[a->source_name]->mRecCount = varNames[a->source_name]->hostRecCount;
else
a->mRecCount = a->hostRecCount;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
if (a->type[exe_type.top()] == 0 && a->string_map.find(exe_type.top()) == a->string_map.end())
update_permutation(a->d_columns_int[exe_type.top()], perm_ptr, a->mRecCount, exe_value.top(), (int_type*)temp, 64);
else if (a->type[exe_type.top()] == 1)
update_permutation(a->d_columns_float[exe_type.top()], perm_ptr, a->mRecCount,exe_value.top(), (float_type*)temp, 64);
else {
//get strings to device
update_char_permutation(a, exe_type.top(), perm_ptr, exe_value.top(), temp, 0);
};
};
b->resize(a->mRecCount); //resize host arrays
b->mRecCount = a->mRecCount;
for (unsigned int i = 0; i < a->mColumnCount; i++) {
if (a->type[a->columnNames[i]] != 1) {
apply_permutation(a->d_columns_int[a->columnNames[i]], perm_ptr, a->mRecCount, (int_type*)temp, 64);
}
else
apply_permutation(a->d_columns_float[a->columnNames[i]], perm_ptr, a->mRecCount, (float_type*)temp, 64);
};
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[a->columnNames[i]] != 1) {
thrust::copy(a->d_columns_int[a->columnNames[i]].begin(), a->d_columns_int[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_int[a->columnNames[i]].begin());
}
else
thrust::copy(a->d_columns_float[a->columnNames[i]].begin(), a->d_columns_float[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_float[a->columnNames[i]].begin());
};
b->deAllocOnDevice();
a->deAllocOnDevice();
cudaFree(temp);
};
varNames[s] = b;
b->segCount = 1;
b->not_compressed = 1;
b->string_map = a->string_map;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(const char *s, const char *f, const int grp_cnt)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Select : couldn't find variable " + string(f) );
};
stat[s] = statement_count;
stat[f] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
check_used_vars();
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
cout << "Couldn't find1 " << f << endl;
process_error(2, "Couldn't find(1) " + string(f) );
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > grp_cnt)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < grp_cnt; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
if(varNames.find(f) != varNames.end())
a = varNames.find(f)->second;
else {
process_error(2, "Couldn't find " + string(f) );
};
if(a->mRecCount == 0 && !a->filtered) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
c->name = s;
clean_queues();
if(verbose)
cout << "SELECT " << s << " count : 0, Mem " << getFreeMem() << endl;
return;
};
if(verbose)
cout << "SELECT " << s << " " << f << " " << getFreeMem() << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
while(!op_v.empty()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) {
tt = op_v.front();
op_v.pop();
if(!op_v.empty()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end()) {
if(aliases.count(tt) == 0) {
aliases[tt] = op_v.front();
};
}
else {
while(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end() && !op_v.empty()) {
op_v.pop();
};
};
};
};
if(!op_v.empty())
op_v.pop();
};
op_v = op_value;
while(!op_v.empty()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
};
op_v.pop();
};
for (auto it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet *b, *c;
if(a->segCount <= 1)
setSegments(a, op_vx);
allocColumns(a, op_vx);
unsigned int cycle_count;
if(a->filtered)
cycle_count = varNames[a->source_name]->segCount;
else
cycle_count = a->segCount;
size_t ol_count = a->mRecCount, cnt;
a->hostRecCount = a->mRecCount;
b = new CudaSet(0, col_count);
b->name = "tmp b in select";
bool c_set = 0;
//size_t tmp_size = a->mRecCount;
//if(a->segCount > 1)
// tmp_size = a->maxRecs;
vector<thrust::device_vector<int_type> > distinct_val; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_hash; //keeps array of DISTINCT values for every key
vector<thrust::device_vector<int_type> > distinct_tmp;
/* for(unsigned int i = 0; i < distinct_cnt; i++) {
distinct_tmp.push_back(thrust::device_vector<int_type>(tmp_size));
distinct_val.push_back(thrust::device_vector<int_type>());
distinct_hash.push_back(thrust::device_vector<int_type>());
};
*/
bool one_liner;
if (grp_cnt != 0)
phase_copy = 1;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
if(verbose)
cout << "segment " << i << " select mem " << getFreeMem() << endl;
std::clock_t start3 = std::clock();
cnt = 0;
copyColumns(a, op_vx, i, cnt);
if(a->mRecCount) {
if (grp_cnt != 0) {
bool srt = 0;
stack<string> op_vv(op_v2);
while(!op_vv.empty()) {
if(!min_max_eq[op_vv.top()])
srt = 1;
op_vv.pop();
};
if(srt) {
order_inplace(a, op_v2, field_names, 1);
a->GroupBy(op_v2);
}
else {
if(a->grp.size() < a->mRecCount)
a->grp.resize(a->mRecCount);
thrust::fill(a->grp.begin(),a->grp.begin()+a->mRecCount,0);
a->grp[a->mRecCount-1] = 1;
a->grp_count = 1;
};
}
else
a->grp_count = 0;
copyFinalize(a, op_vx,0);
one_liner = select(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a,b, distinct_tmp);
if(i == 0)
std::reverse(b->columnNames.begin(), b->columnNames.end());
if (!c_set && b->mRecCount > 0) {
c = new CudaSet(0, col_count);
create_c(c,b);
c_set = 1;
c->name = s;
};
if (grp_cnt && cycle_count > 1 && b->mRecCount > 0) {
add(c,b,op_v3, aliases, distinct_tmp, distinct_val, distinct_hash, a);
}
else {
//copy b to c
unsigned int c_offset = c->mRecCount;
c->resize(b->mRecCount);
for(unsigned int j=0; j < b->columnNames.size(); j++) {
if (b->type[b->columnNames[j]] == 0) {
thrust::copy(b->d_columns_int[b->columnNames[j]].begin(), b->d_columns_int[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_int[b->columnNames[j]].begin() + c_offset);
}
else if (b->type[b->columnNames[j]] == 1) {
thrust::copy(b->d_columns_float[b->columnNames[j]].begin(), b->d_columns_float[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_float[b->columnNames[j]].begin() + c_offset);
};
};
};
//std::cout<< "add time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << '\n';
};
std::cout<< "cycle sel time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
};
phase_copy = 0;
a->mRecCount = ol_count;
a->mRecCount = a->hostRecCount;
a->deAllocOnDevice();
b->deAllocOnDevice();
a->grp.resize(0);
a->grp.shrink_to_fit();
for(auto i = 0; i < alloced_mem.size(); i++) {
cudaFree(alloced_mem[i]);
alloced_mem.pop_back();
};
if(!c_set) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
c->name = s;
clean_queues();
return;
};
if (grp_cnt) {
count_avg(c, distinct_hash);
}
else {
if(one_liner) {
count_simple(c);
};
};
c->maxRecs = c->mRecCount;
c->hostRecCount = c->mRecCount;
c->string_map = b->string_map;
c->name = s;
c->keep = 1;
if(verbose)
cout << "select res " << c->mRecCount << endl;
size_t tot_size = c->maxRecs*8*c->columnNames.size();
if (getFreeMem() < tot_size*3) {
c->segCount = ((tot_size*3)/getFreeMem() + 1);
c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1);
};
clean_queues();
varNames[s] = c;
b->free();
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
if(verbose)
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_insert(const char *f, const char* s) {
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Insert : couldn't find variable " + string(f));
};
if (stat.find(s) == stat.end() && data_dict.count(s) == 0) {
process_error(2, "Insert : couldn't find variable " + string(s) );
};
check_used_vars();
stat[f] = statement_count;
stat[s] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end() || varNames.find(s) == varNames.end()) {
clean_queues();
return;
};
if(verbose)
cout << "INSERT " << f << " " << s << endl;
insert_records(f,s);
clean_queues();
};
void emit_delete(const char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Delete : couldn't find variable " + string(f));
};
stat[f] = statement_count;
check_used_vars();
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
delete_records(f);
cout << "DELETE " << f << endl;
clean_queues();
}
void emit_case()
{
op_case = 1;
if (scan_state == 1)
cout << "emit case " << endl;
//extract releveant values and pass to modified filter
// get a bool vector back
/* while(!op_type.empty())
{
cout << "CASE type " << op_type.front() << endl;
op_type.pop();
}
*/
}
void emit_create_index(const char *index_name, const char *table, const char *column)
{
if (scan_state != 0) {
FILE *f;
string s1(table);
string s3 = s1 + ".key";
f = fopen(s3.c_str(), "w");
fputs(column,f);
fclose(f);
};
}
void emit_create_interval(const char *interval_name, const char *table, const char *lcolumn, const char *rcolumn)
{
if (scan_state != 0) {
FILE *f;
string s1(table);
string s3 = s1 + ".interval";
f = fopen(s3.c_str(), "w");
fputs(lcolumn,f);
fputc('|',f);
fputs(rcolumn,f);
fclose(f);
};
}
void emit_create_bitmap_index(const char *index_name, const char *ltable, const char *rtable, const char *rcolumn, const char *lid, const char *rid)
{
statement_count++;
if (scan_state == 0) {
emit_name(rcolumn);
emit_sel_name(rcolumn);
emit_name(lid);
emit_name(rid);
check_used_vars();
stat[rtable] = std::numeric_limits<unsigned int>::max();
stat[ltable] = std::numeric_limits<unsigned int>::max();
}
else {
cout << ltable << " " << rtable << " " << rid << " " << lid << endl;
emit_name(rcolumn);
emit_sel_name(rcolumn);
emit_name(lid);
emit_name(rid);
check_used_vars();
if(varNames.find(ltable) == varNames.end())
cout << "Couldn't find " << ltable << endl;
if(varNames.find(rtable) == varNames.end())
cout << "Couldn't find " << rtable << endl;
CudaSet* left = varNames.find(ltable)->second;
CudaSet* right = varNames.find(rtable)->second;
queue<string> op_vx;
op_vx.push(rcolumn);op_vx.push(rid);
allocColumns(right, op_vx);
right->CopyColumnToGpu(rid, 0, 0);
right->CopyColumnToGpu(rcolumn, 0, 0);
op_vx.pop();op_vx.pop();
op_vx.push(lid);
allocColumns(left, op_vx);
for(int i = 0; i < left->segCount; i++) {
left->CopyColumnToGpu(lid, i, 0);
thrust::device_vector<unsigned int> output(left->mRecCount);
thrust::lower_bound(right->d_columns_int[rid].begin(), right->d_columns_int[rid].begin() + right->mRecCount,
left->d_columns_int[lid].begin(), left->d_columns_int[lid].begin() + left->mRecCount, output.begin());
string str = std::string(ltable) + std::string(".") + std::string(rtable) + std::string(".") + std::string(rcolumn) + std::string(".") + to_string(i);
thrust::device_vector<int_type> res(left->mRecCount);
thrust::host_vector<int_type> res_h(left->mRecCount);
if(right->type[rcolumn] == 0) {
thrust::gather(output.begin(), output.begin() + left->mRecCount, right->d_columns_int[rcolumn].begin() , res.begin());
thrust::copy(res.begin(), res.begin() + left->mRecCount, res_h.begin());
compress_int(str, res_h);
}
else if(right->type[rcolumn] == 1) {
}
else { //strings
string f1 = right->load_file_name + "." + rcolumn + ".0.hash"; //need to change it in case if there are dimensions tables larger than 1 segment ?
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
if(res_h.size() < cnt)
res_h.resize(cnt);
if(res.size() < cnt)
res.resize(cnt);
fread(res_h.data(), cnt*8, 1, f);
res = res_h;
fclose(f);
thrust::device_vector<int_type> output1(left->mRecCount);
thrust::gather(output.begin(), output.begin() + left->mRecCount ,
res.begin(), output1.begin());
thrust::copy(output1.begin(), output1.begin() + left->mRecCount, res_h.begin());
compress_int(str, res_h);
};
};
};
}
void emit_display(const char *f, const char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(2, "Filter : couldn't find variable " + string(f) );
};
stat[f] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Display(limit, 0, 1);
clean_queues();
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
}
void emit_filter(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end() && data_dict.count(f) == 0) {
process_error(1, "Filter : couldn't find variable " + string(f));
};
stat[s] = statement_count;
stat[f] = statement_count;
filter_var[s] = f;
// check possible use of other variables in filters
queue<string> op(op_value);
while(!op.empty()) {
size_t pos1 = op.front().find_first_of(".", 0);
if(pos1 != string::npos) {
stat[op.front().substr(0,pos1)] = statement_count;
};
op.pop();
};
check_used_vars();
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "INLINE FILTER " << f << endl;
b = a->copyDeviceStruct();
b->name = s;
b->sorted_fields = a->sorted_fields;
b->presorted_fields = a->presorted_fields;
//save the stack
b->fil_s = s;
b->fil_f = f;
b->fil_type = op_type;
b->fil_value = op_value;
b->fil_nums = op_nums;
b->fil_nums_f = op_nums_f;
b->fil_nums_precision = op_nums_precision;
b->filtered = 1;
b->tmp_table = a->tmp_table;
b->string_map = a->string_map;
if(a->filtered) {
b->source_name = a->source_name;
b->fil_f = a->fil_f;
while(!a->fil_value.empty()) {
b->fil_value.push(a->fil_value.front());
a->fil_value.pop();
};
while(!a->fil_type.empty()) {
b->fil_type.push(a->fil_type.front());
a->fil_type.pop();
};
b->fil_type.push("AND");
while(!a->fil_nums.empty()) {
b->fil_nums.push(a->fil_nums.front());
a->fil_nums.pop();
};
while(!a->fil_nums_precision.empty()) {
b->fil_nums_precision.push(a->fil_nums_precision.front());
a->fil_nums_precision.pop();
};
while(!a->fil_nums_f.empty()) {
b->fil_nums_f.push(a->fil_nums_f.front());
a->fil_nums_f.pop();
};
a->filtered = 0;
varNames.erase(f);
}
else
b->source_name = f;
b->maxRecs = a->maxRecs;
b->prm_d.resize(a->maxRecs);
};
b->hostRecCount = a->hostRecCount;
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
}
void emit_store(const char *s, const char *f, const char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end() && data_dict.count(s) == 0) {
process_error(2, "Store : couldn't find variable " + string(s) );
};
stat[s] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
clean_queues();
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(verbose)
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(const char *s, const char *f, const bool append)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end() && data_dict.count(s) == 0) {
process_error(2, "Store : couldn't find variable " + string(s));
};
stat[s] = statement_count;
if(filter_var.find(f) != filter_var.end())
stat[filter_var[f]] = statement_count;
clean_queues();
return;
};
cout << "Append " << append << endl;
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
total_count = 0;
total_segments = 0;
a->maxRecs = 0;
if(fact_file_loaded) {
a->Store(f,"", limit, 1, append);
}
else {
FILE* file_p;
if(a->text_source) {
file_p = fopen(a->load_file_name.c_str(), "rb");
if (!file_p) {
process_error(2, "Could not open file " + a->load_file_name );
};
};
thrust::device_vector<char> d_readbuff;
thrust::device_vector<char*> dest(a->mColumnCount);
thrust::device_vector<unsigned int> ind(a->mColumnCount);
thrust::device_vector<unsigned int> dest_len(a->mColumnCount);
while(!fact_file_loaded) {
if(verbose)
cout << "LOADING " << a->load_file_name << " mem: " << getFreeMem() << endl;
if(a->text_source)
fact_file_loaded = a->LoadBigFile(file_p, d_readbuff, dest, ind, dest_len);
if(a->maxRecs < a->mRecCount)
a->maxRecs = a->mRecCount;
a->Store(f,"", limit, 1, append);
};
};
a->writeSortHeader(f);
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(const char *s, const char *f, const int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
if(verbose)
printf("BINARY LOAD: %s %s \n", s, f);
std::clock_t start1 = std::clock();
CudaSet *a;
unsigned int segCount, maxRecs;
string f1(f);
f1 += "." + namevars.front() + ".header";
FILE* ff = fopen(f1.c_str(), "rb");
if(!ff) {
process_error(2, "Couldn't open file " + f1);
};
size_t totRecs;
fread((char *)&totRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
if(verbose)
cout << "Reading " << totRecs << " records" << endl;
a = new CudaSet(namevars, typevars, sizevars, cols, totRecs, f, maxRecs);
a->segCount = segCount;
a->keep = true;
a->name = s;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
std::cout<< "load time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_load(const char *s, const char *f, const int d, const char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->keep = true;
a->not_compressed = 1;
a->load_file_name = f;
a->separator = sep;
varNames[s] = a;
fact_file_loaded = 0;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void emit_show_tables()
{
if (scan_state == 1) {
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
cout << (*it).first << endl;
};
};
return;
}
void emit_drop_table(const char* table_name)
{
if (scan_state == 1) {
map<string, map<string, col_data> >::iterator iter;
if((iter = data_dict.find(table_name)) != data_dict.end()) {
auto s = (*iter).second;
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
int seg = 0;
string f_name = (*iter).first + "." + (*it).first + "." + to_string(seg);
while(!remove(f_name.c_str())) {
seg++;
f_name = (*iter).first + "." + (*it).first + "." + to_string(seg);
};
f_name = (*iter).first + "." + (*it).first + ".header";
remove(f_name.c_str());
};
};
string s_name = (*iter).first + ".presort";
remove(s_name.c_str());
s_name = (*iter).first + ".sort";
remove(s_name.c_str());
if(data_dict.find(table_name) != data_dict.end()) {
data_dict.erase(table_name);
};
save_dict = 1;
};
return;
}
void emit_describe_table(const char* table_name)
{
if (scan_state == 1) {
map<string, map<string, col_data> >::iterator iter;
if((iter = data_dict.find(table_name)) != data_dict.end()) {
auto s = (*iter).second;
for (auto it=s.begin() ; it != s.end(); ++it ) {
if ((*it).second.col_type == 0) {
if((*it).second.col_length) {
if((*it).second.col_length != UINT_MAX)
cout << (*it).first << " decimal with precision of " << (*it).second.col_length << endl;
else
cout << (*it).first << " timestamp" << endl;
}
else
cout << (*it).first << " integer" << endl;
}
else if ((*it).second.col_type == 1) {
cout << (*it).first << " float" << endl;
}
else if ((*it).second.col_type == 3) {
cout << (*it).first << " decimal" << endl;
}
else {
cout << (*it).first << " char(" << (*it).second.col_length << ")" << endl;
};
};
};
};
return;
}
void yyerror(char *s, ...)
{
extern int yylineno;
extern char *yytext;
fprintf(stderr, "%d: error: ", yylineno);
cout << yytext << endl;
error_cb(1, s);
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!op_nums_precision.empty()) op_nums_precision.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
while(!op_sort.empty()) op_sort.pop();
while(!op_presort.empty()) op_presort.pop();
while(!join_type.empty()) join_type.pop();
while(!join_eq_type.empty()) join_eq_type.pop();
op_case = 0;
sel_count = 0;
join_cnt = 0;
join_col_cnt = 0;
distinct_cnt = 0;
join_tab_cnt = 0;
tab_cnt = 0;
join_and_cnt.clear();
}
void load_vars()
{
if(used_vars.size() == 0) {
//cout << "Error, no valid column names have been found " << endl;
//exit(0);
}
else {
for (auto it=used_vars.begin(); it != used_vars.end(); ++it ) {
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
if(stat.count((*it).first) != 0) {
auto c = (*it).second;
for (auto sit=c.begin() ; sit != c.end(); ++sit ) {
//cout << "name " << (*sit).first << " " << data_dict[(*it).first][(*sit).first].col_length << endl;
namevars.push((*sit).first);
if(data_dict[(*it).first][(*sit).first].col_type == 0) {
if(data_dict[(*it).first][(*sit).first].col_length == 0) {
typevars.push("int");
}
else {
if(data_dict[(*it).first][(*sit).first].col_length == UINT_MAX)
typevars.push("timestamp");
else
typevars.push("decimal");
}
}
else if(data_dict[(*it).first][(*sit).first].col_type == 1)
typevars.push("float");
else typevars.push("char");
sizevars.push(data_dict[(*it).first][(*sit).first].col_length);
cols.push(0);
};
emit_load_binary((*it).first.c_str(), (*it).first.c_str(), 0);
};
};
};
}
void process_error(int severity, string err) {
switch (severity) {
case 1:
err = "(Warning) " + err;
break;
case 2:
err = "(Fatal) " + err;
break;
default:
err = "(Aborting) " + err;
break;
}
error_cb(severity, err.c_str()); // send the error to the c based callback
}
void alenkaInit(char ** av)
{
process_count = 1000000000;
verbose = 0;
scan_state = 1;
statement_count = 0;
clean_queues();
//context = CreateCudaDevice(0, nullptr, true);
}
void alenkaClose()
{
statement_count = 0;
if(alloced_sz) {
cudaFree(alloced_tmp);
alloced_sz = 0;
};
}
|
98ef1979985f22b70829fb602c986210b3584461.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hipcub/hipcub.hpp>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "range_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void RangeKernel(const T start, const T delta, const int count, T* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < count) {
output[index] = start + delta * index;
}
}
template <typename T>
bool RangeImpl(hipStream_t stream, const T start, const T delta, const int count, T* output) {
constexpr int block_size = 256;
int grid_size = (count + block_size - 1) / block_size;
hipLaunchKernelGGL(( RangeKernel<T>), dim3(grid_size), dim3(block_size), 0, stream, start, delta, count, output);
return CUDA_CALL(hipPeekAtLastError());
}
#define SPECIALIZED_IMPL(T) \
template bool RangeImpl<T>(hipStream_t stream, const T start, const T delta, const int count, T* output);
SPECIALIZED_IMPL(int16_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace cuda
} // namespace onnxruntime
| 98ef1979985f22b70829fb602c986210b3584461.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cub/cub.cuh>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "range_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void RangeKernel(const T start, const T delta, const int count, T* output) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < count) {
output[index] = start + delta * index;
}
}
template <typename T>
bool RangeImpl(cudaStream_t stream, const T start, const T delta, const int count, T* output) {
constexpr int block_size = 256;
int grid_size = (count + block_size - 1) / block_size;
RangeKernel<T><<<grid_size, block_size, 0, stream>>>(start, delta, count, output);
return CUDA_CALL(cudaPeekAtLastError());
}
#define SPECIALIZED_IMPL(T) \
template bool RangeImpl<T>(cudaStream_t stream, const T start, const T delta, const int count, T* output);
SPECIALIZED_IMPL(int16_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
} // namespace cuda
} // namespace onnxruntime
|
efee719ffcea7c6f687756785f641345134a79e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef BCNN_USE_CUDA
#include "bcnn_dropout_layer.h"
#include "bcnn_tensor.h"
#include "bcnn_utils.h"
__global__ void _bcnn_dropout_layer_kernel(float *input, int size, float *rand,
float prob, float scale) {
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id < size) {
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
}
void bcnn_forward_dropout_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_dropout_param *param = (bcnn_dropout_param *)node->param;
int size = bcnn_tensor_size(src_tensor);
if (net->mode != BCNN_MODE_TRAIN) {
return;
}
bcnn_cuda_fill_with_random(param->rand_gpu, size);
hipLaunchKernelGGL(( _bcnn_dropout_layer_kernel), dim3(bcnn_cuda_blocks(size)), dim3(BCNN_CUDA_THREADS), 0, 0,
src_tensor->data_gpu, size, param->rand_gpu, param->dropout_rate,
param->scale);
bcnn_cuda_check(hipPeekAtLastError());
return;
}
void bcnn_backward_dropout_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_dropout_param *param = (bcnn_dropout_param *)node->param;
int size = bcnn_tensor_size(src_tensor);
if (!src_tensor->grad_data_gpu) {
return;
}
hipLaunchKernelGGL(( _bcnn_dropout_layer_kernel), dim3(bcnn_cuda_blocks(size)), dim3(BCNN_CUDA_THREADS), 0, 0,
src_tensor->grad_data_gpu, size, param->rand_gpu, param->dropout_rate,
param->scale);
bcnn_cuda_check(hipPeekAtLastError());
return;
}
#endif | efee719ffcea7c6f687756785f641345134a79e7.cu | /*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef BCNN_USE_CUDA
#include "bcnn_dropout_layer.h"
#include "bcnn_tensor.h"
#include "bcnn_utils.h"
__global__ void _bcnn_dropout_layer_kernel(float *input, int size, float *rand,
float prob, float scale) {
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id < size) {
input[id] = (rand[id] < prob) ? 0 : input[id] * scale;
}
}
void bcnn_forward_dropout_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_dropout_param *param = (bcnn_dropout_param *)node->param;
int size = bcnn_tensor_size(src_tensor);
if (net->mode != BCNN_MODE_TRAIN) {
return;
}
bcnn_cuda_fill_with_random(param->rand_gpu, size);
_bcnn_dropout_layer_kernel<<<bcnn_cuda_blocks(size), BCNN_CUDA_THREADS>>>(
src_tensor->data_gpu, size, param->rand_gpu, param->dropout_rate,
param->scale);
bcnn_cuda_check(cudaPeekAtLastError());
return;
}
void bcnn_backward_dropout_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_dropout_param *param = (bcnn_dropout_param *)node->param;
int size = bcnn_tensor_size(src_tensor);
if (!src_tensor->grad_data_gpu) {
return;
}
_bcnn_dropout_layer_kernel<<<bcnn_cuda_blocks(size), BCNN_CUDA_THREADS>>>(
src_tensor->grad_data_gpu, size, param->rand_gpu, param->dropout_rate,
param->scale);
bcnn_cuda_check(cudaPeekAtLastError());
return;
}
#endif |
ef392b159345f41769f6c9031f44d7e47c8e8cba.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "csr.h"
#include <gtest/gtest.h>
#include "sparse/csr.h"
#include "random/rng.h"
#include "test_utils.h"
#include <iostream>
namespace MLCommon {
namespace Sparse {
template <typename T>
class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
CSRInputs<T> params;
};
const std::vector<CSRInputs<float>> inputsf = {
{5, 10, 5, 1234ULL}};
typedef CSRTest<float> CSRToCOO;
TEST_P(CSRToCOO, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *ex_scan;
int *result, *verify;
int *ex_scan_h = new int[4]{0, 4, 8, 9 };
int *verify_h = new int[10]{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 3 };
allocate(verify, 10);
allocate(ex_scan, 4);
allocate(result, 10, true);
updateDevice(ex_scan, ex_scan_h, 4, stream);
updateDevice(verify, verify_h, 10, stream);
csr_to_coo<32>(ex_scan, 4, result, 10, stream);
std::cout << MLCommon::arr2Str(result, 10, "result", stream) << std::endl;
ASSERT_TRUE(devArrMatch<int>(verify, result, 10, Compare<float>(), stream));
std::cout << "Verified!" << std::endl;
delete ex_scan_h;
delete verify_h;
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
hipStreamDestroy(stream);
}
typedef CSRTest<float> CSRRowNormalizeMax;
TEST_P(CSRRowNormalizeMax, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9 };
float in_vals_h[10] = { 5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0 };
float verify_h[10] = { 1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0 };
allocate(in_vals, 10);
allocate(verify, 10);
allocate(ex_scan, 4);
allocate(result, 10, true);
updateDevice(ex_scan, *&ex_scan_h, 4, stream);
updateDevice(in_vals, *&in_vals_h, 10, stream);
updateDevice(verify, *&verify_h, 10, stream);
csr_row_normalize_max<32, float>(ex_scan, in_vals, 10, 4, result, stream);
std::cout << MLCommon::arr2Str(result, 10, "result", stream) << std::endl;
ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>()));
hipStreamDestroy(stream);
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(in_vals));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
}
typedef CSRTest<float> CSRRowNormalizeL1;
TEST_P(CSRRowNormalizeL1, Result) {
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9 };
float in_vals_h[10] = { 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0 };
float verify_h[10] = { 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0 };
allocate(in_vals, 10);
allocate(verify, 10);
allocate(ex_scan, 4);
allocate(result, 10, true);
updateDevice(ex_scan, *&ex_scan_h, 4, 0);
updateDevice(in_vals, *&in_vals_h, 10, 0);
updateDevice(verify, *&verify_h, 10, 0);
csr_row_normalize_l1<32, float>(ex_scan, in_vals, 10, 4, result, 0);
hipDeviceSynchronize();
ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>()));
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(in_vals));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
}
typedef CSRTest<float> CSRSum;
TEST_P(CSRSum, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
int *ex_scan, *ind_ptr_a, *ind_ptr_b, *verify_indptr;
float *in_vals_a, *in_vals_b, *verify;
int ex_scan_h[4] = {0, 4, 8, 9 };
int indptr_a_h[10] = { 1, 2, 3, 4, 1, 2, 3, 5, 0, 1 };
int indptr_b_h[10] = { 1, 2, 5, 4, 0, 2, 3, 5, 1, 0 };
float in_vals_h[10] = { 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0 };
float verify_h[14] = { 2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
int verify_indptr_h[14] = { 1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0 };
allocate(in_vals_a, 10);
allocate(in_vals_b, 10);
allocate(verify, 14);
allocate(ex_scan, 4);
allocate(verify_indptr, 14);
allocate(ind_ptr_a, 10);
allocate(ind_ptr_b, 10);
updateDevice(ex_scan, *&ex_scan_h, 4, stream);
updateDevice(in_vals_a, *&in_vals_h, 10, stream);
updateDevice(in_vals_b, *&in_vals_h, 10, stream);
updateDevice(verify, *&verify_h, 14, stream);
updateDevice(verify_indptr, *&verify_indptr_h, 14, stream);
updateDevice(ind_ptr_a, *&indptr_a_h, 10, stream);
updateDevice(ind_ptr_b, *&indptr_b_h, 10, stream);
int *result_ind;
allocate(result_ind, 4);
int nnz = csr_add_calc_inds<float, 32>(
ex_scan, ind_ptr_a, in_vals_a, 10,
ex_scan, ind_ptr_b, in_vals_b, 10,
4, result_ind,
0
);
int *result_indptr;
float *result_val;
allocate(result_indptr, nnz);
allocate(result_val, nnz);
csr_add_finalize<float, 32>(
ex_scan, ind_ptr_a, in_vals_a, 10,
ex_scan, ind_ptr_b, in_vals_b, 10,
4, result_ind, result_indptr, result_val,
0
);
ASSERT_TRUE(nnz==14);
ASSERT_TRUE(devArrMatch<float>(verify, result_val, nnz, Compare<float>()));
ASSERT_TRUE(devArrMatch<int>(verify_indptr, result_indptr, nnz, Compare<int>()));
hipStreamDestroy(stream);
CUDA_CHECK(hipFree(ex_scan));
CUDA_CHECK(hipFree(in_vals_a));
CUDA_CHECK(hipFree(in_vals_b));
CUDA_CHECK(hipFree(ind_ptr_a));
CUDA_CHECK(hipFree(ind_ptr_b));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result_indptr));
CUDA_CHECK(hipFree(result_val));
}
INSTANTIATE_TEST_CASE_P(CSRTests, CSRToCOO,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeMax,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeL1,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRSum,
::testing::ValuesIn(inputsf));
}}
| ef392b159345f41769f6c9031f44d7e47c8e8cba.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "csr.h"
#include <gtest/gtest.h>
#include "sparse/csr.h"
#include "random/rng.h"
#include "test_utils.h"
#include <iostream>
namespace MLCommon {
namespace Sparse {
template <typename T>
class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
CSRInputs<T> params;
};
const std::vector<CSRInputs<float>> inputsf = {
{5, 10, 5, 1234ULL}};
typedef CSRTest<float> CSRToCOO;
TEST_P(CSRToCOO, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *ex_scan;
int *result, *verify;
int *ex_scan_h = new int[4]{0, 4, 8, 9 };
int *verify_h = new int[10]{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 3 };
allocate(verify, 10);
allocate(ex_scan, 4);
allocate(result, 10, true);
updateDevice(ex_scan, ex_scan_h, 4, stream);
updateDevice(verify, verify_h, 10, stream);
csr_to_coo<32>(ex_scan, 4, result, 10, stream);
std::cout << MLCommon::arr2Str(result, 10, "result", stream) << std::endl;
ASSERT_TRUE(devArrMatch<int>(verify, result, 10, Compare<float>(), stream));
std::cout << "Verified!" << std::endl;
delete ex_scan_h;
delete verify_h;
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
cudaStreamDestroy(stream);
}
typedef CSRTest<float> CSRRowNormalizeMax;
TEST_P(CSRRowNormalizeMax, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9 };
float in_vals_h[10] = { 5.0, 1.0, 0.0, 0.0, 10.0, 1.0, 0.0, 0.0, 1.0, 0.0 };
float verify_h[10] = { 1.0, 0.2, 0.0, 0.0, 1.0, 0.1, 0.0, 0.0, 1, 0.0 };
allocate(in_vals, 10);
allocate(verify, 10);
allocate(ex_scan, 4);
allocate(result, 10, true);
updateDevice(ex_scan, *&ex_scan_h, 4, stream);
updateDevice(in_vals, *&in_vals_h, 10, stream);
updateDevice(verify, *&verify_h, 10, stream);
csr_row_normalize_max<32, float>(ex_scan, in_vals, 10, 4, result, stream);
std::cout << MLCommon::arr2Str(result, 10, "result", stream) << std::endl;
ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>()));
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(in_vals));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
}
typedef CSRTest<float> CSRRowNormalizeL1;
TEST_P(CSRRowNormalizeL1, Result) {
int *ex_scan;
float *in_vals, *result, *verify;
int ex_scan_h[4] = {0, 4, 8, 9 };
float in_vals_h[10] = { 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0 };
float verify_h[10] = { 0.5, 0.5, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 1, 0.0 };
allocate(in_vals, 10);
allocate(verify, 10);
allocate(ex_scan, 4);
allocate(result, 10, true);
updateDevice(ex_scan, *&ex_scan_h, 4, 0);
updateDevice(in_vals, *&in_vals_h, 10, 0);
updateDevice(verify, *&verify_h, 10, 0);
csr_row_normalize_l1<32, float>(ex_scan, in_vals, 10, 4, result, 0);
cudaDeviceSynchronize();
ASSERT_TRUE(devArrMatch<float>(verify, result, 10, Compare<float>()));
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(in_vals));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
}
typedef CSRTest<float> CSRSum;
TEST_P(CSRSum, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *ex_scan, *ind_ptr_a, *ind_ptr_b, *verify_indptr;
float *in_vals_a, *in_vals_b, *verify;
int ex_scan_h[4] = {0, 4, 8, 9 };
int indptr_a_h[10] = { 1, 2, 3, 4, 1, 2, 3, 5, 0, 1 };
int indptr_b_h[10] = { 1, 2, 5, 4, 0, 2, 3, 5, 1, 0 };
float in_vals_h[10] = { 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0 };
float verify_h[14] = { 2.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
int verify_indptr_h[14] = { 1, 2, 3, 4, 5, 1, 2, 3, 5, 0, 0, 1, 1, 0 };
allocate(in_vals_a, 10);
allocate(in_vals_b, 10);
allocate(verify, 14);
allocate(ex_scan, 4);
allocate(verify_indptr, 14);
allocate(ind_ptr_a, 10);
allocate(ind_ptr_b, 10);
updateDevice(ex_scan, *&ex_scan_h, 4, stream);
updateDevice(in_vals_a, *&in_vals_h, 10, stream);
updateDevice(in_vals_b, *&in_vals_h, 10, stream);
updateDevice(verify, *&verify_h, 14, stream);
updateDevice(verify_indptr, *&verify_indptr_h, 14, stream);
updateDevice(ind_ptr_a, *&indptr_a_h, 10, stream);
updateDevice(ind_ptr_b, *&indptr_b_h, 10, stream);
int *result_ind;
allocate(result_ind, 4);
int nnz = csr_add_calc_inds<float, 32>(
ex_scan, ind_ptr_a, in_vals_a, 10,
ex_scan, ind_ptr_b, in_vals_b, 10,
4, result_ind,
0
);
int *result_indptr;
float *result_val;
allocate(result_indptr, nnz);
allocate(result_val, nnz);
csr_add_finalize<float, 32>(
ex_scan, ind_ptr_a, in_vals_a, 10,
ex_scan, ind_ptr_b, in_vals_b, 10,
4, result_ind, result_indptr, result_val,
0
);
ASSERT_TRUE(nnz==14);
ASSERT_TRUE(devArrMatch<float>(verify, result_val, nnz, Compare<float>()));
ASSERT_TRUE(devArrMatch<int>(verify_indptr, result_indptr, nnz, Compare<int>()));
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(ex_scan));
CUDA_CHECK(cudaFree(in_vals_a));
CUDA_CHECK(cudaFree(in_vals_b));
CUDA_CHECK(cudaFree(ind_ptr_a));
CUDA_CHECK(cudaFree(ind_ptr_b));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result_indptr));
CUDA_CHECK(cudaFree(result_val));
}
INSTANTIATE_TEST_CASE_P(CSRTests, CSRToCOO,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeMax,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRRowNormalizeL1,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CSRTests, CSRSum,
::testing::ValuesIn(inputsf));
}}
|
094b5cc70ccd0f1b2d0f1a7c1450357e25528a05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ppmHelper.h"
#include "mp3Helper.h"
#include <bits/stdc++.h>
using namespace std;
#define THREADS_PER_BLOCK 512
__global__
void encrypt(unsigned char * inputImageData, unsigned char * outputImageData, int width, int height,
char * audioData, long long audioSize) {
long long index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < audioSize){
unsigned char audioByte = (unsigned char)audioData[index];
outputImageData[index * 8 + 0] = (inputImageData[index * 8 + 0] | 1) & (254 + ((audioByte>>7) & 1));
outputImageData[index * 8 + 1] = (inputImageData[index * 8 + 1] | 1) & (254 + ((audioByte>>6) & 1));
outputImageData[index * 8 + 2] = (inputImageData[index * 8 + 2] | 1) & (254 + ((audioByte>>5) & 1));
outputImageData[index * 8 + 3] = (inputImageData[index * 8 + 3] | 1) & (254 + ((audioByte>>4) & 1));
outputImageData[index * 8 + 4] = (inputImageData[index * 8 + 4] | 1) & (254 + ((audioByte>>3) & 1));
outputImageData[index * 8 + 5] = (inputImageData[index * 8 + 5] | 1) & (254 + ((audioByte>>2) & 1));
outputImageData[index * 8 + 6] = (inputImageData[index * 8 + 6] | 1) & (254 + ((audioByte>>1) & 1));
outputImageData[index * 8 + 7] = (inputImageData[index * 8 + 7] | 1) & (254 + ((audioByte>>0) & 1));
}
}
int main(int argc, char *argv[]){
char *inputImageFile = argv[1];
char *inputAudioFile = argv[2];
// Create Cuda Events //
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
////////////////////////////
// Read input image
PPMimg *inpImg = readPPM(inputImageFile);
int width = inpImg->width;
int height = inpImg->height;
long long totPixels = (long long)width * height;
PPMpixel *inData = inpImg->data;
PPMpixel *outData = (PPMpixel *)malloc(sizeof(PPMpixel) * totPixels);
unsigned char * inputImageData = ppmTochar(inData, width, height);
unsigned char * outputImageData = (unsigned char *)malloc(totPixels * 3ll);
// Copy input image to device memory
unsigned char *d_inputImageData, *d_outputImageData;
hipMalloc((void**)&d_inputImageData, totPixels * 3ll);
hipMalloc((void**)&d_outputImageData, totPixels * 3ll);
hipMemcpy(d_inputImageData,inputImageData,totPixels * 3ll,hipMemcpyHostToDevice);
hipMemcpy(d_outputImageData,inputImageData,totPixels * 3ll,hipMemcpyHostToDevice);
//--------------------------------------------------------------------------//
// Read input audio file
MP3File *inpAudio = readMP3(inputAudioFile);
char *audioData = inpAudio->data;
// Copy audio file to device memory
char *d_audioData;
hipMalloc((void**)&d_audioData, inpAudio->size);
hipMemcpy(d_audioData, audioData, inpAudio->size, hipMemcpyHostToDevice);
//--------------------------------------------------------------------------//
cout << "Size of audio file = " << inpAudio->size << " bytes ("
<< (inpAudio->size * 8) << " bits)\n";
cout << "Size of image file = " << totPixels * 3 << " bytes\n";
// Invoke Kernel
long long audioSize = inpAudio -> size;
dim3 blockDim(THREADS_PER_BLOCK, 1, 1);
dim3 gridDim((audioSize-1)/THREADS_PER_BLOCK + 1, 1, 1);
cout<<"Blocks = "<<(audioSize-1)/THREADS_PER_BLOCK + 1<<"\n";
hipEventRecord(start);
hipLaunchKernelGGL(( encrypt), dim3(blockDim), dim3(gridDim), 0, 0, d_inputImageData, d_outputImageData, width, height, d_audioData,
audioSize);
hipEventRecord(stop);
hipEventSynchronize(stop);
float gpuTime = 0;
hipEventElapsedTime(&gpuTime, start, stop);
//--------------------------------------------------------------------------//
// Writing result to host
hipMemcpy(outputImageData, d_outputImageData, totPixels * 3 ,hipMemcpyDeviceToHost);
// Writing back output image
char outputImageFile[] = "././Dataset/parallel_output.ppm";
writePPM(outputImageFile, outputImageData, inpImg->width, inpImg->height, 3);
//--------------------------------------------------------------------------//
// Free memory
free(audioData);
hipFree(d_inputImageData);
hipFree(d_outputImageData);
hipFree(d_audioData);
// Time Print
cout<<"GPU Time taken (encrypt) = "<<gpuTime<<" ms\n";
} | 094b5cc70ccd0f1b2d0f1a7c1450357e25528a05.cu | #include "ppmHelper.h"
#include "mp3Helper.h"
#include <bits/stdc++.h>
using namespace std;
#define THREADS_PER_BLOCK 512
__global__
void encrypt(unsigned char * inputImageData, unsigned char * outputImageData, int width, int height,
char * audioData, long long audioSize) {
long long index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < audioSize){
unsigned char audioByte = (unsigned char)audioData[index];
outputImageData[index * 8 + 0] = (inputImageData[index * 8 + 0] | 1) & (254 + ((audioByte>>7) & 1));
outputImageData[index * 8 + 1] = (inputImageData[index * 8 + 1] | 1) & (254 + ((audioByte>>6) & 1));
outputImageData[index * 8 + 2] = (inputImageData[index * 8 + 2] | 1) & (254 + ((audioByte>>5) & 1));
outputImageData[index * 8 + 3] = (inputImageData[index * 8 + 3] | 1) & (254 + ((audioByte>>4) & 1));
outputImageData[index * 8 + 4] = (inputImageData[index * 8 + 4] | 1) & (254 + ((audioByte>>3) & 1));
outputImageData[index * 8 + 5] = (inputImageData[index * 8 + 5] | 1) & (254 + ((audioByte>>2) & 1));
outputImageData[index * 8 + 6] = (inputImageData[index * 8 + 6] | 1) & (254 + ((audioByte>>1) & 1));
outputImageData[index * 8 + 7] = (inputImageData[index * 8 + 7] | 1) & (254 + ((audioByte>>0) & 1));
}
}
int main(int argc, char *argv[]){
char *inputImageFile = argv[1];
char *inputAudioFile = argv[2];
// Create Cuda Events //
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
////////////////////////////
// Read input image
PPMimg *inpImg = readPPM(inputImageFile);
int width = inpImg->width;
int height = inpImg->height;
long long totPixels = (long long)width * height;
PPMpixel *inData = inpImg->data;
PPMpixel *outData = (PPMpixel *)malloc(sizeof(PPMpixel) * totPixels);
unsigned char * inputImageData = ppmTochar(inData, width, height);
unsigned char * outputImageData = (unsigned char *)malloc(totPixels * 3ll);
// Copy input image to device memory
unsigned char *d_inputImageData, *d_outputImageData;
cudaMalloc((void**)&d_inputImageData, totPixels * 3ll);
cudaMalloc((void**)&d_outputImageData, totPixels * 3ll);
cudaMemcpy(d_inputImageData,inputImageData,totPixels * 3ll,cudaMemcpyHostToDevice);
cudaMemcpy(d_outputImageData,inputImageData,totPixels * 3ll,cudaMemcpyHostToDevice);
//--------------------------------------------------------------------------//
// Read input audio file
MP3File *inpAudio = readMP3(inputAudioFile);
char *audioData = inpAudio->data;
// Copy audio file to device memory
char *d_audioData;
cudaMalloc((void**)&d_audioData, inpAudio->size);
cudaMemcpy(d_audioData, audioData, inpAudio->size, cudaMemcpyHostToDevice);
//--------------------------------------------------------------------------//
cout << "Size of audio file = " << inpAudio->size << " bytes ("
<< (inpAudio->size * 8) << " bits)\n";
cout << "Size of image file = " << totPixels * 3 << " bytes\n";
// Invoke Kernel
long long audioSize = inpAudio -> size;
dim3 blockDim(THREADS_PER_BLOCK, 1, 1);
dim3 gridDim((audioSize-1)/THREADS_PER_BLOCK + 1, 1, 1);
cout<<"Blocks = "<<(audioSize-1)/THREADS_PER_BLOCK + 1<<"\n";
cudaEventRecord(start);
encrypt<<<blockDim, gridDim>>>(d_inputImageData, d_outputImageData, width, height, d_audioData,
audioSize);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float gpuTime = 0;
cudaEventElapsedTime(&gpuTime, start, stop);
//--------------------------------------------------------------------------//
// Writing result to host
cudaMemcpy(outputImageData, d_outputImageData, totPixels * 3 ,cudaMemcpyDeviceToHost);
// Writing back output image
char outputImageFile[] = "././Dataset/parallel_output.ppm";
writePPM(outputImageFile, outputImageData, inpImg->width, inpImg->height, 3);
//--------------------------------------------------------------------------//
// Free memory
free(audioData);
cudaFree(d_inputImageData);
cudaFree(d_outputImageData);
cudaFree(d_audioData);
// Time Print
cout<<"GPU Time taken (encrypt) = "<<gpuTime<<" ms\n";
} |
4c16bcc62d8d8de070c315cf49c46691d793b7fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include "../common/stopwatch.h"
void initialData1(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (int)(rand() % 1000)+1;
}
return;
}
void initialData2(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (int)(rand() % 80)+1;
}
return;
}
int maxCPU(int a, int b) { return (a>b)? a : b;}
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%d ", in[i]);
}
printf("\n");
return;
}
int KPCPU1 (int *v, int *w, int *dp, int N, int W)
{
for(int i=0;i<N;i++)
{
for(int j=W; j>=w[i]; j--)
dp[j]=maxCPU(dp[j],dp[j-w[i]]+v[i]);
}
return dp[W];
}
int KPCPU2 (int *v, int *w, int **dp, int N, int W)
{
for(int i=0;i<=N;i++)
dp[i][0]=0;
for(int i=0;i<=W;i++)
dp[0][i]=0;
for(int i=1;i<=N;i++)
{
for(int j=1; j<=W;j++)
{
if(j<w[i-1])
dp[i][j]=dp[i-1][j];
else
dp[i][j]=maxCPU(dp[i-1][j],dp[i-1][j-w[i-1]]+v[i-1]);
//printf("%d ",dp[i][j]);
}
//printf("\n");
}
return dp[N][W];
}
__device__ int maxGPU (int a, int b) { return (a>b)? a : b;}
__global__ void KPGPU(int *prev, int *tmp,int *w, int *v,int N, int W, int i)
{
unsigned int j=blockIdx.x*blockDim.x+threadIdx.x;
if(j<=W)
{
if(j<w[i])
tmp[j]=prev[j];
else
tmp[j]=maxGPU(prev[j],prev[j-w[i]]+v[i]);
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting main at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int B=512;
int W=400,N=100000;
if (argc > 1) B = atoi(argv[1]);
if (argc > 2) N = atoi(argv[2]);
if (argc > 3) W = atoi(argv[2]);
dim3 block (B);
dim3 grid ((W + block.x - 1) / block.x);
int *h_DP, *h_v, *h_w,**h_mat,*h_res;
int *d_v, *d_w,*d_prev,*d_tmp;
size_t nBytes = (W+1) * sizeof(int);
h_DP = (int *)malloc(nBytes);
h_res = (int *)malloc(nBytes);
hipMalloc((int**)&d_prev, nBytes);
hipMalloc((int**)&d_tmp, nBytes);
hipMemset(d_prev,0,nBytes);
hipMemset(d_tmp,0,nBytes);
h_mat=(int**)malloc((N+1)*sizeof(int*));
for(int i=0;i<=N;i++)
h_mat[i]=(int*)malloc((nBytes));
memset(h_DP,0,nBytes);
nBytes = N*sizeof(int);
h_v = (int *)malloc(nBytes);
h_w = (int *)malloc(nBytes);
initialData1(h_v,N);
initialData2(h_w,N);
hipMalloc((int**)&d_v, nBytes);
hipMalloc((int**)&d_w, nBytes);
CHECK(hipMemcpy(d_v, h_v, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_w, h_w, nBytes, hipMemcpyHostToDevice));
Stopwatch s;
printf("%d\n",KPCPU1(h_v,h_w,h_DP,N,W));
printf("KPCPU1 elapsed %f sec \n",s.elapsed());
s.reset();
printf("%d",KPCPU2(h_v,h_w,h_mat,N,W));
printf("\nKPCPU2 elapsed %f sec \n\n",s.elapsed());
s.reset();
for(int i=0;i<=N;i++)
{
hipLaunchKernelGGL(( KPGPU), dim3(grid),dim3(block), 0, 0, d_prev,d_tmp,d_w,d_v,N,W,i);
CHECK(hipStreamSynchronize(0));
CHECK(hipGetLastError());
int *t=d_prev;
d_prev=d_tmp;
d_tmp=t;
}
printf("\n%d * KPGPU<<<%d,%d>>> elapsed %f sec \n",N,grid.x,block.x,s.elapsed());
CHECK(hipMemcpy(h_res, d_prev, (W+1) *sizeof(int), hipMemcpyDeviceToHost));
printf("%d\n",h_res[W]);
for(int i=0;i<=N;i++)
free(h_mat[i]);
free(h_mat);
free(h_DP);
free(h_v);
free(h_w);
hipFree(d_v);
hipFree(d_w);
hipFree(d_prev);
hipFree(d_tmp);
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
/*
a starting main at device 0: GeForce GTX 1050
327704
KPCPU1 elapsed 0.232883 sec
327704
KPCPU2 elapsed 0.371544 sec
100000 * KPGPU<<<13,32>>> elapsed 9.027132 sec
327704
==6696== Profiling application: a 32
==6696== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 99.90% 123.64ms 100001 1.2360us 1.1830us 2.5920us KPGPU(int*, int*, int*, int*, int, int, int)
0.10% 122.53us 2 61.264us 61.184us 61.344us [CUDA memcpy HtoD]
0.00% 2.2720us 2 1.1360us 800ns 1.4720us [CUDA memset]
0.00% 1.3760us 1 1.3760us 1.3760us 1.3760us [CUDA memcpy DtoH]
API calls: 79.55% 7.22795s 100001 72.278us 23.400us 1.7266ms hipStreamSynchronize
17.01% 1.54571s 100001 15.456us 13.600us 1.2323ms cudaLaunchKernel
2.38% 216.29ms 4 54.072ms 9.4000us 216.22ms hipMalloc
0.66% 59.521ms 1 59.521ms 59.521ms 59.521ms hipDeviceReset
0.38% 34.281ms 100001 342ns 200ns 281.30us hipGetLastError
0.01% 723.80us 3 241.27us 143.10us 352.40us hipMemcpy
0.01% 717.50us 97 7.3960us 200ns 315.70us hipDeviceGetAttribute
0.01% 602.80us 1 602.80us 602.80us 602.80us hipGetDeviceProperties
0.00% 397.10us 4 99.275us 24.900us 255.60us hipFree
0.00% 45.100us 1 45.100us 45.100us 45.100us cuDeviceTotalMem
0.00% 31.000us 2 15.500us 5.0000us 26.000us hipMemset
0.00% 13.500us 1 13.500us 13.500us 13.500us hipSetDevice
0.00% 13.400us 1 13.400us 13.400us 13.400us hipDeviceGetPCIBusId
0.00% 4.6000us 2 2.3000us 400ns 4.2000us hipDeviceGet
0.00% 2.3000us 3 766ns 300ns 1.0000us hipGetDeviceCount
0.00% 1.6000us 1 1.6000us 1.6000us 1.6000us hipDeviceGetName
0.00% 700ns 1 700ns 700ns 700ns hipDeviceGetUuid
0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetLuid
a starting main at device 0: GeForce GTX 1050
327704
KPCPU1 elapsed 0.249529 sec
327704
KPCPU2 elapsed 0.389207 sec
100000 * KPGPU<<<2,256>>> elapsed 9.194475 sec
327704
==6328== Profiling application: a 256
==6328== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 99.90% 122.36ms 100001 1.2230us 1.1520us 2.4320us KPGPU(int*, int*, int*, int*, int, int, int)
0.10% 122.62us 2 61.312us 61.152us 61.472us [CUDA memcpy HtoD]
0.00% 2.2400us 2 1.1200us 800ns 1.4400us [CUDA memset]
0.00% 1.3440us 1 1.3440us 1.3440us 1.3440us [CUDA memcpy DtoH]
API calls: 79.55% 7.36634s 100001 73.662us 26.800us 5.2921ms hipStreamSynchronize
16.96% 1.57101s 100001 15.709us 13.500us 415.40us cudaLaunchKernel
2.46% 227.53ms 4 56.884ms 9.1000us 227.47ms hipMalloc
0.63% 58.153ms 1 58.153ms 58.153ms 58.153ms hipDeviceReset
0.37% 34.661ms 100001 346ns 200ns 231.40us hipGetLastError
0.01% 798.40us 3 266.13us 198.20us 357.20us hipMemcpy
0.01% 656.90us 1 656.90us 656.90us 656.90us hipGetDeviceProperties
0.01% 609.30us 97 6.2810us 200ns 257.00us hipDeviceGetAttribute
0.01% 475.50us 4 118.88us 24.500us 304.00us hipFree
0.00% 41.500us 1 41.500us 41.500us 41.500us cuDeviceTotalMem
0.00% 27.200us 2 13.600us 5.3000us 21.900us hipMemset
0.00% 18.900us 1 18.900us 18.900us 18.900us hipSetDevice
0.00% 13.700us 1 13.700us 13.700us 13.700us hipDeviceGetPCIBusId
0.00% 4.3000us 2 2.1500us 400ns 3.9000us hipDeviceGet
0.00% 2.8000us 3 933ns 300ns 1.5000us hipGetDeviceCount
0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us hipDeviceGetName
0.00% 600ns 1 600ns 600ns 600ns hipDeviceGetUuid
0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetLuid
a starting main at device 0: GeForce GTX 1050
327704
KPCPU1 elapsed 0.233630 sec
327704
KPCPU2 elapsed 0.370733 sec
100000 * KPGPU<<<1,512>>> elapsed 9.369431 sec
327704
==4884== Profiling application: a
==4884== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 99.90% 125.31ms 100001 1.2530us 1.1830us 6.6870us KPGPU(int*, int*, int*, int*, int, int, int)
0.10% 122.14us 2 61.072us 61.024us 61.120us [CUDA memcpy HtoD]
0.00% 2.3040us 2 1.1520us 800ns 1.5040us [CUDA memset]
0.00% 1.5680us 1 1.5680us 1.5680us 1.5680us [CUDA memcpy DtoH]
API calls: 79.02% 7.44478s 100001 74.447us 23.200us 12.277ms hipStreamSynchronize
17.66% 1.66368s 100001 16.636us 13.500us 6.9754ms cudaLaunchKernel
2.26% 212.78ms 4 53.194ms 9.9000us 212.71ms hipMalloc
0.64% 60.005ms 1 60.005ms 60.005ms 60.005ms hipDeviceReset
0.39% 36.685ms 100001 366ns 200ns 1.0904ms hipGetLastError
0.01% 1.0136ms 3 337.87us 213.00us 437.30us hipMemcpy
0.01% 681.70us 97 7.0270us 200ns 304.50us hipDeviceGetAttribute
0.01% 602.70us 1 602.70us 602.70us 602.70us hipGetDeviceProperties
0.00% 470.60us 4 117.65us 25.200us 297.80us hipFree
0.00% 39.200us 1 39.200us 39.200us 39.200us cuDeviceTotalMem
0.00% 26.800us 2 13.400us 5.1000us 21.700us hipMemset
0.00% 13.500us 1 13.500us 13.500us 13.500us hipDeviceGetPCIBusId
0.00% 12.500us 1 12.500us 12.500us 12.500us hipSetDevice
0.00% 4.2000us 2 2.1000us 500ns 3.7000us hipDeviceGet
0.00% 2.2000us 3 733ns 300ns 1.1000us hipGetDeviceCount
0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us hipDeviceGetName
0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetLuid
0.00% 400ns 1 400ns 400ns 400ns hipDeviceGetUuid
*/ | 4c16bcc62d8d8de070c315cf49c46691d793b7fe.cu | #include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include "../common/stopwatch.h"
void initialData1(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (int)(rand() % 1000)+1;
}
return;
}
void initialData2(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (int)(rand() % 80)+1;
}
return;
}
int maxCPU(int a, int b) { return (a>b)? a : b;}
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%d ", in[i]);
}
printf("\n");
return;
}
int KPCPU1 (int *v, int *w, int *dp, int N, int W)
{
for(int i=0;i<N;i++)
{
for(int j=W; j>=w[i]; j--)
dp[j]=maxCPU(dp[j],dp[j-w[i]]+v[i]);
}
return dp[W];
}
int KPCPU2 (int *v, int *w, int **dp, int N, int W)
{
for(int i=0;i<=N;i++)
dp[i][0]=0;
for(int i=0;i<=W;i++)
dp[0][i]=0;
for(int i=1;i<=N;i++)
{
for(int j=1; j<=W;j++)
{
if(j<w[i-1])
dp[i][j]=dp[i-1][j];
else
dp[i][j]=maxCPU(dp[i-1][j],dp[i-1][j-w[i-1]]+v[i-1]);
//printf("%d ",dp[i][j]);
}
//printf("\n");
}
return dp[N][W];
}
__device__ int maxGPU (int a, int b) { return (a>b)? a : b;}
__global__ void KPGPU(int *prev, int *tmp,int *w, int *v,int N, int W, int i)
{
unsigned int j=blockIdx.x*blockDim.x+threadIdx.x;
if(j<=W)
{
if(j<w[i])
tmp[j]=prev[j];
else
tmp[j]=maxGPU(prev[j],prev[j-w[i]]+v[i]);
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting main at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int B=512;
int W=400,N=100000;
if (argc > 1) B = atoi(argv[1]);
if (argc > 2) N = atoi(argv[2]);
if (argc > 3) W = atoi(argv[2]);
dim3 block (B);
dim3 grid ((W + block.x - 1) / block.x);
int *h_DP, *h_v, *h_w,**h_mat,*h_res;
int *d_v, *d_w,*d_prev,*d_tmp;
size_t nBytes = (W+1) * sizeof(int);
h_DP = (int *)malloc(nBytes);
h_res = (int *)malloc(nBytes);
cudaMalloc((int**)&d_prev, nBytes);
cudaMalloc((int**)&d_tmp, nBytes);
cudaMemset(d_prev,0,nBytes);
cudaMemset(d_tmp,0,nBytes);
h_mat=(int**)malloc((N+1)*sizeof(int*));
for(int i=0;i<=N;i++)
h_mat[i]=(int*)malloc((nBytes));
memset(h_DP,0,nBytes);
nBytes = N*sizeof(int);
h_v = (int *)malloc(nBytes);
h_w = (int *)malloc(nBytes);
initialData1(h_v,N);
initialData2(h_w,N);
cudaMalloc((int**)&d_v, nBytes);
cudaMalloc((int**)&d_w, nBytes);
CHECK(cudaMemcpy(d_v, h_v, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_w, h_w, nBytes, cudaMemcpyHostToDevice));
Stopwatch s;
printf("%d\n",KPCPU1(h_v,h_w,h_DP,N,W));
printf("KPCPU1 elapsed %f sec \n",s.elapsed());
s.reset();
printf("%d",KPCPU2(h_v,h_w,h_mat,N,W));
printf("\nKPCPU2 elapsed %f sec \n\n",s.elapsed());
s.reset();
for(int i=0;i<=N;i++)
{
KPGPU<<<grid,block>>>(d_prev,d_tmp,d_w,d_v,N,W,i);
CHECK(cudaStreamSynchronize(0));
CHECK(cudaGetLastError());
int *t=d_prev;
d_prev=d_tmp;
d_tmp=t;
}
printf("\n%d * KPGPU<<<%d,%d>>> elapsed %f sec \n",N,grid.x,block.x,s.elapsed());
CHECK(cudaMemcpy(h_res, d_prev, (W+1) *sizeof(int), cudaMemcpyDeviceToHost));
printf("%d\n",h_res[W]);
for(int i=0;i<=N;i++)
free(h_mat[i]);
free(h_mat);
free(h_DP);
free(h_v);
free(h_w);
cudaFree(d_v);
cudaFree(d_w);
cudaFree(d_prev);
cudaFree(d_tmp);
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
/*
a starting main at device 0: GeForce GTX 1050
327704
KPCPU1 elapsed 0.232883 sec
327704
KPCPU2 elapsed 0.371544 sec
100000 * KPGPU<<<13,32>>> elapsed 9.027132 sec
327704
==6696== Profiling application: a 32
==6696== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 99.90% 123.64ms 100001 1.2360us 1.1830us 2.5920us KPGPU(int*, int*, int*, int*, int, int, int)
0.10% 122.53us 2 61.264us 61.184us 61.344us [CUDA memcpy HtoD]
0.00% 2.2720us 2 1.1360us 800ns 1.4720us [CUDA memset]
0.00% 1.3760us 1 1.3760us 1.3760us 1.3760us [CUDA memcpy DtoH]
API calls: 79.55% 7.22795s 100001 72.278us 23.400us 1.7266ms cudaStreamSynchronize
17.01% 1.54571s 100001 15.456us 13.600us 1.2323ms cudaLaunchKernel
2.38% 216.29ms 4 54.072ms 9.4000us 216.22ms cudaMalloc
0.66% 59.521ms 1 59.521ms 59.521ms 59.521ms cudaDeviceReset
0.38% 34.281ms 100001 342ns 200ns 281.30us cudaGetLastError
0.01% 723.80us 3 241.27us 143.10us 352.40us cudaMemcpy
0.01% 717.50us 97 7.3960us 200ns 315.70us cuDeviceGetAttribute
0.01% 602.80us 1 602.80us 602.80us 602.80us cudaGetDeviceProperties
0.00% 397.10us 4 99.275us 24.900us 255.60us cudaFree
0.00% 45.100us 1 45.100us 45.100us 45.100us cuDeviceTotalMem
0.00% 31.000us 2 15.500us 5.0000us 26.000us cudaMemset
0.00% 13.500us 1 13.500us 13.500us 13.500us cudaSetDevice
0.00% 13.400us 1 13.400us 13.400us 13.400us cuDeviceGetPCIBusId
0.00% 4.6000us 2 2.3000us 400ns 4.2000us cuDeviceGet
0.00% 2.3000us 3 766ns 300ns 1.0000us cuDeviceGetCount
0.00% 1.6000us 1 1.6000us 1.6000us 1.6000us cuDeviceGetName
0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetUuid
0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetLuid
a starting main at device 0: GeForce GTX 1050
327704
KPCPU1 elapsed 0.249529 sec
327704
KPCPU2 elapsed 0.389207 sec
100000 * KPGPU<<<2,256>>> elapsed 9.194475 sec
327704
==6328== Profiling application: a 256
==6328== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 99.90% 122.36ms 100001 1.2230us 1.1520us 2.4320us KPGPU(int*, int*, int*, int*, int, int, int)
0.10% 122.62us 2 61.312us 61.152us 61.472us [CUDA memcpy HtoD]
0.00% 2.2400us 2 1.1200us 800ns 1.4400us [CUDA memset]
0.00% 1.3440us 1 1.3440us 1.3440us 1.3440us [CUDA memcpy DtoH]
API calls: 79.55% 7.36634s 100001 73.662us 26.800us 5.2921ms cudaStreamSynchronize
16.96% 1.57101s 100001 15.709us 13.500us 415.40us cudaLaunchKernel
2.46% 227.53ms 4 56.884ms 9.1000us 227.47ms cudaMalloc
0.63% 58.153ms 1 58.153ms 58.153ms 58.153ms cudaDeviceReset
0.37% 34.661ms 100001 346ns 200ns 231.40us cudaGetLastError
0.01% 798.40us 3 266.13us 198.20us 357.20us cudaMemcpy
0.01% 656.90us 1 656.90us 656.90us 656.90us cudaGetDeviceProperties
0.01% 609.30us 97 6.2810us 200ns 257.00us cuDeviceGetAttribute
0.01% 475.50us 4 118.88us 24.500us 304.00us cudaFree
0.00% 41.500us 1 41.500us 41.500us 41.500us cuDeviceTotalMem
0.00% 27.200us 2 13.600us 5.3000us 21.900us cudaMemset
0.00% 18.900us 1 18.900us 18.900us 18.900us cudaSetDevice
0.00% 13.700us 1 13.700us 13.700us 13.700us cuDeviceGetPCIBusId
0.00% 4.3000us 2 2.1500us 400ns 3.9000us cuDeviceGet
0.00% 2.8000us 3 933ns 300ns 1.5000us cuDeviceGetCount
0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us cuDeviceGetName
0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetUuid
0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetLuid
a starting main at device 0: GeForce GTX 1050
327704
KPCPU1 elapsed 0.233630 sec
327704
KPCPU2 elapsed 0.370733 sec
100000 * KPGPU<<<1,512>>> elapsed 9.369431 sec
327704
==4884== Profiling application: a
==4884== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 99.90% 125.31ms 100001 1.2530us 1.1830us 6.6870us KPGPU(int*, int*, int*, int*, int, int, int)
0.10% 122.14us 2 61.072us 61.024us 61.120us [CUDA memcpy HtoD]
0.00% 2.3040us 2 1.1520us 800ns 1.5040us [CUDA memset]
0.00% 1.5680us 1 1.5680us 1.5680us 1.5680us [CUDA memcpy DtoH]
API calls: 79.02% 7.44478s 100001 74.447us 23.200us 12.277ms cudaStreamSynchronize
17.66% 1.66368s 100001 16.636us 13.500us 6.9754ms cudaLaunchKernel
2.26% 212.78ms 4 53.194ms 9.9000us 212.71ms cudaMalloc
0.64% 60.005ms 1 60.005ms 60.005ms 60.005ms cudaDeviceReset
0.39% 36.685ms 100001 366ns 200ns 1.0904ms cudaGetLastError
0.01% 1.0136ms 3 337.87us 213.00us 437.30us cudaMemcpy
0.01% 681.70us 97 7.0270us 200ns 304.50us cuDeviceGetAttribute
0.01% 602.70us 1 602.70us 602.70us 602.70us cudaGetDeviceProperties
0.00% 470.60us 4 117.65us 25.200us 297.80us cudaFree
0.00% 39.200us 1 39.200us 39.200us 39.200us cuDeviceTotalMem
0.00% 26.800us 2 13.400us 5.1000us 21.700us cudaMemset
0.00% 13.500us 1 13.500us 13.500us 13.500us cuDeviceGetPCIBusId
0.00% 12.500us 1 12.500us 12.500us 12.500us cudaSetDevice
0.00% 4.2000us 2 2.1000us 500ns 3.7000us cuDeviceGet
0.00% 2.2000us 3 733ns 300ns 1.1000us cuDeviceGetCount
0.00% 1.4000us 1 1.4000us 1.4000us 1.4000us cuDeviceGetName
0.00% 600ns 1 600ns 600ns 600ns cuDeviceGetLuid
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetUuid
*/ |
c63527666e990167737031d88c66c4d593571287.hip | // !!! This is a file automatically generated by hipify!!!
/*Author: Rodrigo Gonalves de Branco
Date: 12/01/2017
*/
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cmath>
#include <climits>
#include <stdio.h>
#include <omp.h>
#include <thrust/device_vector.h>
#include "cuda_util.h"
using namespace std;
__global__
void prefixsumJAxis(int* v, int N)
{
int sqrN = N*N;
for(int k = blockIdx.x; k < N; k += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int j = 1; j < N; j++) {
v[sqrN*k + N*i + j] += v[sqrN*k + N*i + j-1];
}
}
}
}
__global__
void prefixsumKAxis(int* v, int N)
{
int sqrN = N*N;
for(int j = blockIdx.x; j < N; j += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int k = 1; k < N; k++) {
v[sqrN*k + N*i + j] += v[sqrN*(k-1) + N*i + j];
}
}
}
}
__device__ inline
int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__ inline
int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__device__ inline
int maxSubArraySum(int* v, int N, int g, int h, int r, int t) {
int max_so_far = 0, max_ending_here = 0;
int sqrN = N*N;
for(int i = 0; i < N; i++)
{
int tmp1 = v[sqrN*t + N*i + h];
int tmp2 = r > 0 ? v[sqrN*(r-1) + N*i + h] : 0;
int tmp3 = g > 0 ? v[sqrN*t + N*i + (g-1)] : 0;
//Maybe repeated elements were subtracted. If that is true, we need correct it!
int tmp4 = r > 0 && g > 0 ? v[sqrN*(r-1) + N*i + (g-1)] : 0 ;
int temp = tmp1 - tmp2 - tmp3 + tmp4;
//printf("g:%d h:%d r:%d t:%d => %d - %d - %d + %d = %d\n",g,h,r,t,tmp1,tmp2,tmp3,tmp4,temp);
max_ending_here = max_ending_here + temp;
if(max_ending_here < 0)
max_ending_here = 0;
if(max_so_far < max_ending_here)
max_so_far = max_ending_here;
}
return max_so_far;
}
__global__
void computeCghrt(int* v, int N, int * result, int dev_id, int devCount, int computationSize, int totalComp)
{
int maxsofar = INT_MIN;
//to cover all R e T index
//printf("blk:%d thd:%d gridDim:%d blockDim:%d\n",blockIdx.x,threadIdx.x,gridDim.x,blockDim.x);
extern __shared__ int max_block[];
if(threadIdx.x == 0)
max_block[0] = INT_MIN;
__syncthreads();
for(int blkstep = 0; blkstep < computationSize; blkstep += gridDim.x) {
int blkstep_dev = blkstep + dev_id*computationSize;
int r = row_index(blockIdx.x + blkstep_dev,N);
int t = column_index(blockIdx.x + blkstep_dev,N);
if(r >= 0 && t >= 0 && r < N && t < N && r <= t) {
//to cover all G e H index
for(int thdstep = 0; thdstep < totalComp; thdstep += blockDim.x) {
int g = row_index(threadIdx.x + thdstep,N);
int h = column_index(threadIdx.x + thdstep,N);
if(g >= 0 && h >= 0 && g < N && h < N && g <= h) {
int newmax = maxSubArraySum(v,N,g,h,r,t);
maxsofar = newmax > maxsofar ? newmax : maxsofar;
}
}
}
}
//atomicMax(result,maxsofar);
atomicMax(&max_block[0],maxsofar);
__syncthreads();
if(threadIdx.x == 0)
atomicMax(&result[0],max_block[0]);
}
/*void print(int* v, int N) {
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cout<<v[N*N*k + N*i + j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
}*/
int main() {
//size of cube
int N;
cin>>N;
//cube representation: O(n^3) of space
int* cube = (int*)malloc(N*N*N*sizeof(int**));
//Reading the values
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cin>>cube[N*N*k + N*i + j];
}
}
}
//cout<<"original:"<<endl;
//print(cube,N);
int devCount;
HANDLE_ERROR( hipGetDeviceCount(&devCount));
thrust::host_vector<int> max_device(devCount);
int global_max = -1;
#pragma omp parallel num_threads(devCount) default(shared)
{
const int dev_id = omp_get_thread_num();
HANDLE_ERROR( hipSetDevice(dev_id) );
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, dev_id);
int* dcube;
HANDLE_ERROR( hipMalloc( (void**)&dcube, N*N*N*sizeof(int)));
HANDLE_ERROR( hipMemcpy( dcube, cube, N*N*N*sizeof(int),hipMemcpyHostToDevice ) );
thrust::device_vector<int> d_result(1,INT_MIN);
int numSMs;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, 0);
dim3 dimThreads(256);
dim3 dimBlocks(32*numSMs);
hipEvent_t start,stop;
HANDLE_ERROR( hipEventCreate(&start) );
HANDLE_ERROR( hipEventCreate(&stop) );
HANDLE_ERROR( hipEventRecord(start, 0) );
hipLaunchKernelGGL(( prefixsumJAxis), dim3(dimBlocks),dim3(dimThreads), 0, 0, dcube,N);
HANDLE_ERROR( hipDeviceSynchronize() );
//HANDLE_ERROR( hipMemcpy( cube, dcube,N*N*N*sizeof(int),hipMemcpyDeviceToHost));
//cout<<"first ps:"<<endl;
//print(cube,N);
hipLaunchKernelGGL(( prefixsumKAxis), dim3(dimBlocks),dim3(dimThreads), 0, 0, dcube,N);
HANDLE_ERROR( hipDeviceSynchronize() );
//cout<<endl<<"second ps:"<<endl;
//HANDLE_ERROR( hipMemcpy( cube, dcube,N*N*N*sizeof(int),hipMemcpyDeviceToHost));
//print(cube,N);
//cout<<"computation size: "<<N*(N+1)/2<<endl;
int totalComp = ((N*(N+1))>>1);
int computationSize = (int)(totalComp/devCount);
if(totalComp % devCount != 0) {
computationSize++;
}
hipLaunchKernelGGL(( computeCghrt), dim3(dimBlocks),dim3(dimThreads), sizeof(int), 0, dcube,N,thrust::raw_pointer_cast(d_result.data()),dev_id,devCount,computationSize,totalComp);
HANDLE_ERROR( hipDeviceSynchronize() );
max_device[dev_id] = d_result[0];
#pragma omp barrier
#pragma omp single
{
for(int i = 0; i < devCount; i++) {
if(global_max < max_device[i])
global_max = max_device[i];
}
}
HANDLE_ERROR( hipEventRecord(stop, 0) );
HANDLE_ERROR( hipEventSynchronize(start) );
HANDLE_ERROR( hipEventSynchronize(stop) );
float elapsedtime;
HANDLE_ERROR( hipEventElapsedTime(&elapsedtime, start, stop) );
//int result;
//HANDLE_ERROR( hipMemcpy( &result, dresult, sizeof(int),hipMemcpyDeviceToHost));
hipFree(dcube);
//hipFree(dresult);
//cout<<result<<endl;
//printf("%i %.9f\n",result,elapsedtime);
//printf("%.9f\n",elapsedtime);
#pragma omp single
{
//printf("\nO resultado e: %d\n",global_max);
//printf("O tempo foi de: %.9f ms para a mmax2d\n", elapsedtime);
printf("%d %.9f\n",global_max,elapsedtime);
//printf("%.9f\n",elapsedtime);
}
}
free(cube);
return 0;
}
| c63527666e990167737031d88c66c4d593571287.cu | /*Author: Rodrigo Gonçalves de Branco
Date: 12/01/2017
*/
#include <iostream>
#include <vector>
#include <cuda.h>
#include <cstdio>
#include <cmath>
#include <climits>
#include <stdio.h>
#include <omp.h>
#include <thrust/device_vector.h>
#include "cuda_util.h"
using namespace std;
__global__
void prefixsumJAxis(int* v, int N)
{
int sqrN = N*N;
for(int k = blockIdx.x; k < N; k += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int j = 1; j < N; j++) {
v[sqrN*k + N*i + j] += v[sqrN*k + N*i + j-1];
}
}
}
}
__global__
void prefixsumKAxis(int* v, int N)
{
int sqrN = N*N;
for(int j = blockIdx.x; j < N; j += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int k = 1; k < N; k++) {
v[sqrN*k + N*i + j] += v[sqrN*(k-1) + N*i + j];
}
}
}
}
__device__ inline
int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__ inline
int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__device__ inline
int maxSubArraySum(int* v, int N, int g, int h, int r, int t) {
int max_so_far = 0, max_ending_here = 0;
int sqrN = N*N;
for(int i = 0; i < N; i++)
{
int tmp1 = v[sqrN*t + N*i + h];
int tmp2 = r > 0 ? v[sqrN*(r-1) + N*i + h] : 0;
int tmp3 = g > 0 ? v[sqrN*t + N*i + (g-1)] : 0;
//Maybe repeated elements were subtracted. If that is true, we need correct it!
int tmp4 = r > 0 && g > 0 ? v[sqrN*(r-1) + N*i + (g-1)] : 0 ;
int temp = tmp1 - tmp2 - tmp3 + tmp4;
//printf("g:%d h:%d r:%d t:%d => %d - %d - %d + %d = %d\n",g,h,r,t,tmp1,tmp2,tmp3,tmp4,temp);
max_ending_here = max_ending_here + temp;
if(max_ending_here < 0)
max_ending_here = 0;
if(max_so_far < max_ending_here)
max_so_far = max_ending_here;
}
return max_so_far;
}
__global__
void computeCghrt(int* v, int N, int * result, int dev_id, int devCount, int computationSize, int totalComp)
{
int maxsofar = INT_MIN;
//to cover all R e T index
//printf("blk:%d thd:%d gridDim:%d blockDim:%d\n",blockIdx.x,threadIdx.x,gridDim.x,blockDim.x);
extern __shared__ int max_block[];
if(threadIdx.x == 0)
max_block[0] = INT_MIN;
__syncthreads();
for(int blkstep = 0; blkstep < computationSize; blkstep += gridDim.x) {
int blkstep_dev = blkstep + dev_id*computationSize;
int r = row_index(blockIdx.x + blkstep_dev,N);
int t = column_index(blockIdx.x + blkstep_dev,N);
if(r >= 0 && t >= 0 && r < N && t < N && r <= t) {
//to cover all G e H index
for(int thdstep = 0; thdstep < totalComp; thdstep += blockDim.x) {
int g = row_index(threadIdx.x + thdstep,N);
int h = column_index(threadIdx.x + thdstep,N);
if(g >= 0 && h >= 0 && g < N && h < N && g <= h) {
int newmax = maxSubArraySum(v,N,g,h,r,t);
maxsofar = newmax > maxsofar ? newmax : maxsofar;
}
}
}
}
//atomicMax(result,maxsofar);
atomicMax(&max_block[0],maxsofar);
__syncthreads();
if(threadIdx.x == 0)
atomicMax(&result[0],max_block[0]);
}
/*void print(int* v, int N) {
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cout<<v[N*N*k + N*i + j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
}*/
int main() {
//size of cube
int N;
cin>>N;
//cube representation: O(n^3) of space
int* cube = (int*)malloc(N*N*N*sizeof(int**));
//Reading the values
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cin>>cube[N*N*k + N*i + j];
}
}
}
//cout<<"original:"<<endl;
//print(cube,N);
int devCount;
HANDLE_ERROR( cudaGetDeviceCount(&devCount));
thrust::host_vector<int> max_device(devCount);
int global_max = -1;
#pragma omp parallel num_threads(devCount) default(shared)
{
const int dev_id = omp_get_thread_num();
HANDLE_ERROR( cudaSetDevice(dev_id) );
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, dev_id);
int* dcube;
HANDLE_ERROR( cudaMalloc( (void**)&dcube, N*N*N*sizeof(int)));
HANDLE_ERROR( cudaMemcpy( dcube, cube, N*N*N*sizeof(int),cudaMemcpyHostToDevice ) );
thrust::device_vector<int> d_result(1,INT_MIN);
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
dim3 dimThreads(256);
dim3 dimBlocks(32*numSMs);
cudaEvent_t start,stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start, 0) );
prefixsumJAxis<<<dimBlocks,dimThreads>>>(dcube,N);
HANDLE_ERROR( cudaThreadSynchronize() );
//HANDLE_ERROR( cudaMemcpy( cube, dcube,N*N*N*sizeof(int),cudaMemcpyDeviceToHost));
//cout<<"first ps:"<<endl;
//print(cube,N);
prefixsumKAxis<<<dimBlocks,dimThreads>>>(dcube,N);
HANDLE_ERROR( cudaThreadSynchronize() );
//cout<<endl<<"second ps:"<<endl;
//HANDLE_ERROR( cudaMemcpy( cube, dcube,N*N*N*sizeof(int),cudaMemcpyDeviceToHost));
//print(cube,N);
//cout<<"computation size: "<<N*(N+1)/2<<endl;
int totalComp = ((N*(N+1))>>1);
int computationSize = (int)(totalComp/devCount);
if(totalComp % devCount != 0) {
computationSize++;
}
computeCghrt<<<dimBlocks,dimThreads, sizeof(int)>>>(dcube,N,thrust::raw_pointer_cast(d_result.data()),dev_id,devCount,computationSize,totalComp);
HANDLE_ERROR( cudaThreadSynchronize() );
max_device[dev_id] = d_result[0];
#pragma omp barrier
#pragma omp single
{
for(int i = 0; i < devCount; i++) {
if(global_max < max_device[i])
global_max = max_device[i];
}
}
HANDLE_ERROR( cudaEventRecord(stop, 0) );
HANDLE_ERROR( cudaEventSynchronize(start) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
float elapsedtime;
HANDLE_ERROR( cudaEventElapsedTime(&elapsedtime, start, stop) );
//int result;
//HANDLE_ERROR( cudaMemcpy( &result, dresult, sizeof(int),cudaMemcpyDeviceToHost));
cudaFree(dcube);
//cudaFree(dresult);
//cout<<result<<endl;
//printf("%i %.9f\n",result,elapsedtime);
//printf("%.9f\n",elapsedtime);
#pragma omp single
{
//printf("\nO resultado e: %d\n",global_max);
//printf("O tempo foi de: %.9f ms para a mmax2d\n", elapsedtime);
printf("%d %.9f\n",global_max,elapsedtime);
//printf("%.9f\n",elapsedtime);
}
}
free(cube);
return 0;
}
|
9eacc7a534f6b7deb6e689b800e5314f196aaf06.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn/fmha_fwd_launch_template.h>
namespace pytorch_fmha {
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
}; // namespace pytorch_fmha
| 9eacc7a534f6b7deb6e689b800e5314f196aaf06.cu | // Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
namespace pytorch_fmha {
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
}; // namespace pytorch_fmha
|
6d27eaa49f6d04527bca71fa7237383b920c792c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define LIST_SIZE 100000
extern "C" __device__ long long callCountList[LIST_SIZE];
void bambooLogKernelBegin() {
}
void bambooLogRecordOff() {
}
void bambooLogKernelEnd() {
#ifdef KERNELTRACE
hipDeviceSynchronize();
#endif
long long resultArray[LIST_SIZE] = {0};
hipMemcpyFromSymbol(&resultArray, callCountList, LIST_SIZE * sizeof(long long), 0, hipMemcpyDeviceToHost);
FILE *profileFile = fopen("profile_call_prob_result.txt", "w");
for(long long i=0; i<LIST_SIZE; i++){
if(resultArray[i] != 0){
fprintf(profileFile, "%lld: %lld\n", i, resultArray[i]);
}
}
fclose(profileFile);
}
| 6d27eaa49f6d04527bca71fa7237383b920c792c.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#define LIST_SIZE 100000
extern "C" __device__ long long callCountList[LIST_SIZE];
void bambooLogKernelBegin() {
}
void bambooLogRecordOff() {
}
void bambooLogKernelEnd() {
#ifdef KERNELTRACE
cudaDeviceSynchronize();
#endif
long long resultArray[LIST_SIZE] = {0};
cudaMemcpyFromSymbol(&resultArray, callCountList, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost);
FILE *profileFile = fopen("profile_call_prob_result.txt", "w");
for(long long i=0; i<LIST_SIZE; i++){
if(resultArray[i] != 0){
fprintf(profileFile, "%lld: %lld\n", i, resultArray[i]);
}
}
fclose(profileFile);
}
|
7ea3c72b0fd7cd5e7802fa1322061517074a80ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <cstdlib>
class Unified {
public:
void *operator new(size_t len) {
void *ptr;
hipMallocManaged(&ptr, len);
return ptr;
}
void operator delete(void *ptr) {
hipFree(ptr);
}
void *operator new[] (std::size_t size) {
void *ptr;
hipMallocManaged(&ptr,size);
return ptr;
}
void operator delete[] (void* ptr) {
hipFree(ptr);
}
};
class publisher : public Unified
{
public:
float value;
__device__ void setValue(float v) { value=v; }
};
__global__ void publish_msg(publisher *topic,float num) {
int i=threadIdx.x + blockIdx.x*blockDim.x;
topic[i].setValue(i+num);
}
/* GPU kernel: set an array of topic to a value */
__host__ void sub_msg(publisher *topic,int i, int s) {
std::cout<<"subscriber "<< s <<": Topic["<<i<<"] = "<<topic[i].value<<"\n";
}
int main(int argc,char *argv[])
{
int t=0,n=20;
int s=0;//subscriber number
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
publisher *topic = new publisher[n];
hipLaunchKernelGGL(( publish_msg), dim3(1),dim3(n), 0, 0, topic,0.1543); //n=20 is size of topic array
hipDeviceSynchronize();
s=1,t=0; //subscriber s and topic number t
sub_msg(topic,t,s);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout<<"Elapsed time = "<<milliseconds<<" milliseconds\n";
return 0;
}
| 7ea3c72b0fd7cd5e7802fa1322061517074a80ee.cu | #include <iostream>
#include <cuda.h>
#include <cstdlib>
class Unified {
public:
void *operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
return ptr;
}
void operator delete(void *ptr) {
cudaFree(ptr);
}
void *operator new[] (std::size_t size) {
void *ptr;
cudaMallocManaged(&ptr,size);
return ptr;
}
void operator delete[] (void* ptr) {
cudaFree(ptr);
}
};
class publisher : public Unified
{
public:
float value;
__device__ void setValue(float v) { value=v; }
};
__global__ void publish_msg(publisher *topic,float num) {
int i=threadIdx.x + blockIdx.x*blockDim.x;
topic[i].setValue(i+num);
}
/* GPU kernel: set an array of topic to a value */
__host__ void sub_msg(publisher *topic,int i, int s) {
std::cout<<"subscriber "<< s <<": Topic["<<i<<"] = "<<topic[i].value<<"\n";
}
int main(int argc,char *argv[])
{
int t=0,n=20;
int s=0;//subscriber number
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
publisher *topic = new publisher[n];
publish_msg<<<1,n>>>(topic,0.1543); //n=20 is size of topic array
cudaDeviceSynchronize();
s=1,t=0; //subscriber s and topic number t
sub_msg(topic,t,s);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout<<"Elapsed time = "<<milliseconds<<" milliseconds\n";
return 0;
}
|
251c493bdff1ad41d53e574c60fe9b124c39b4d7.hip | // !!! This is a file automatically generated by hipify!!!
// Jin Pyo Jeon
// Times
// N Thread/Block seconds
// 1 << 24 512 0.60
// 1 << 24 480 0.61
// 1 << 24 272 0.61
// 1 << 22 128 0.15
// 1 << 20 32 0.05
// 1 << 20 64 0.047
// 1 << 20 128 0.048
// 1 << 18 32 0.02
// 1 << 17 32 0.013
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#define MASK_WIDTH 5
__global__ void convolution_1D_basic_kernel(float *N, float *M, float *P, long Width) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float pValue = 0;
int nStartPoint = i - (MASK_WIDTH / 2);
if (i < Width) {
for (int j = 0; j < MASK_WIDTH; j++) {
if (nStartPoint + j >= 0 && nStartPoint + j < Width) {
pValue += N[nStartPoint + j] * M[j];
}
}
P[i] = pValue;
}
}
void generateMat(float *m, size_t height, size_t width){
int i, j;
for (i = 0; i < height; i++){
for (j = 0; j < width; j++) {
m[i*width+j] = rand() % 100;
}
}
}
void printMat(float *m, size_t height, size_t width) {
int i, j;
for (i = 0; i < height; i++){
for (j = 0; j < width; j++) {
printf("%f ", m[i*width+j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char**argv){
long width = 1 << 24;
int THREAD_COUNT = 17; // Due to seeming Grid Dim x limitation of 65536
srand(time(NULL));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float * m, *n, *p;
float * d_m, *d_p, *d_n;
long mSize = MASK_WIDTH * sizeof(float);
long nSize = width * sizeof(float);
long pSize = width * sizeof(float);
hipMalloc((void**)&d_m, mSize);
hipMalloc((void**)&d_n, nSize);
hipMalloc((void**)&d_p, pSize);
m = (float *)malloc(mSize);
n = (float *)malloc(nSize);
p = (float *)malloc(pSize);
for (int i = 0; i < MASK_WIDTH; i++) {
m[i] = 1.0/MASK_WIDTH; // averaging mask
}
generateMat(n, 1, width);
hipMemcpy(d_m, m, mSize, hipMemcpyHostToDevice);
hipMemcpy(d_n, n, nSize, hipMemcpyHostToDevice);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("%d: Error %d %s\n", __LINE__, err, hipGetErrorString(err));
exit(-1);
}
long blocks = ceil(width / (float) THREAD_COUNT);
while (blocks >= 65535) {
THREAD_COUNT *= 2;
blocks = ceil(width / (float) THREAD_COUNT);
}
assert(THREAD_COUNT <= 1024);
dim3 DimBlock(THREAD_COUNT, 1, 1);
dim3 DimGrid(blocks, 1, 1);
hipLaunchKernelGGL(( convolution_1D_basic_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, d_n, d_m, d_p, width);
err = hipGetLastError();
if (err != hipSuccess) {
printf("%d: Error %d %s\n", __LINE__, err, hipGetErrorString(err));
exit(-1);
}
hipMemcpy(p, d_p, pSize, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("The elapsed time is %f s with %d threads/block\n", elapsedTime / 1000.0, THREAD_COUNT);
free(n); free(m); free(p);
hipFree(d_n);
hipFree(d_m);
hipFree(d_p);
}
| 251c493bdff1ad41d53e574c60fe9b124c39b4d7.cu | // Jin Pyo Jeon
// Times
// N Thread/Block seconds
// 1 << 24 512 0.60
// 1 << 24 480 0.61
// 1 << 24 272 0.61
// 1 << 22 128 0.15
// 1 << 20 32 0.05
// 1 << 20 64 0.047
// 1 << 20 128 0.048
// 1 << 18 32 0.02
// 1 << 17 32 0.013
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#define MASK_WIDTH 5
__global__ void convolution_1D_basic_kernel(float *N, float *M, float *P, long Width) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float pValue = 0;
int nStartPoint = i - (MASK_WIDTH / 2);
if (i < Width) {
for (int j = 0; j < MASK_WIDTH; j++) {
if (nStartPoint + j >= 0 && nStartPoint + j < Width) {
pValue += N[nStartPoint + j] * M[j];
}
}
P[i] = pValue;
}
}
void generateMat(float *m, size_t height, size_t width){
int i, j;
for (i = 0; i < height; i++){
for (j = 0; j < width; j++) {
m[i*width+j] = rand() % 100;
}
}
}
void printMat(float *m, size_t height, size_t width) {
int i, j;
for (i = 0; i < height; i++){
for (j = 0; j < width; j++) {
printf("%f ", m[i*width+j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char**argv){
long width = 1 << 24;
int THREAD_COUNT = 17; // Due to seeming Grid Dim x limitation of 65536
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float * m, *n, *p;
float * d_m, *d_p, *d_n;
long mSize = MASK_WIDTH * sizeof(float);
long nSize = width * sizeof(float);
long pSize = width * sizeof(float);
cudaMalloc((void**)&d_m, mSize);
cudaMalloc((void**)&d_n, nSize);
cudaMalloc((void**)&d_p, pSize);
m = (float *)malloc(mSize);
n = (float *)malloc(nSize);
p = (float *)malloc(pSize);
for (int i = 0; i < MASK_WIDTH; i++) {
m[i] = 1.0/MASK_WIDTH; // averaging mask
}
generateMat(n, 1, width);
cudaMemcpy(d_m, m, mSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, n, nSize, cudaMemcpyHostToDevice);
cudaError err = cudaGetLastError();
if (err != cudaSuccess) {
printf("%d: Error %d %s\n", __LINE__, err, cudaGetErrorString(err));
exit(-1);
}
long blocks = ceil(width / (float) THREAD_COUNT);
while (blocks >= 65535) {
THREAD_COUNT *= 2;
blocks = ceil(width / (float) THREAD_COUNT);
}
assert(THREAD_COUNT <= 1024);
dim3 DimBlock(THREAD_COUNT, 1, 1);
dim3 DimGrid(blocks, 1, 1);
convolution_1D_basic_kernel<<<DimGrid, DimBlock>>>(d_n, d_m, d_p, width);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("%d: Error %d %s\n", __LINE__, err, cudaGetErrorString(err));
exit(-1);
}
cudaMemcpy(p, d_p, pSize, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("The elapsed time is %f s with %d threads/block\n", elapsedTime / 1000.0, THREAD_COUNT);
free(n); free(m); free(p);
cudaFree(d_n);
cudaFree(d_m);
cudaFree(d_p);
}
|
9796f7bb9acf3f99b5d9fdc85947c00a7dc81d70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY ACCESS
// srad kernel
__global__ void srad( fp d_lambda,
const int d_Nr,
const int d_Nc,
const long d_Ne,
const int *d_iN,
const int *d_iS,
const int *d_jE,
const int *d_jW,
fp *d_dN,
fp *d_dS,
fp *d_dE,
fp *d_dW,
const fp d_q0sqr,
fp *d_c,
const fp *d_I){
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_Jc;
fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc;
fp d_c_loc;
fp d_G2,d_L,d_num,d_den,d_qsqr;
// figure out row/col location in new matrix
row = (ei+1) % d_Nr - 1; // (0-n) row
col = (ei+1) / d_Nr + 1 - 1; // (0-n) column
if((ei+1) % d_Nr == 0){
row = d_Nr - 1;
col = col - 1;
}
if(ei<d_Ne){ // make sure that only threads matching jobs run
// directional derivatives, ICOV, diffusion coefficent
d_Jc = d_I[ei]; // get value of the current element
// directional derivates (every element of IMAGE)(try to copy to shared memory or temp files)
d_dN_loc = d_I[d_iN[row] + d_Nr*col] - d_Jc; // north direction derivative
d_dS_loc = d_I[d_iS[row] + d_Nr*col] - d_Jc; // south direction derivative
d_dW_loc = d_I[row + d_Nr*d_jW[col]] - d_Jc; // west direction derivative
d_dE_loc = d_I[row + d_Nr*d_jE[col]] - d_Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
d_G2 = (d_dN_loc*d_dN_loc + d_dS_loc*d_dS_loc + d_dW_loc*d_dW_loc + d_dE_loc*d_dE_loc) / (d_Jc*d_Jc); // gradient (based on derivatives)
// normalized discrete laplacian (equ 54)
d_L = (d_dN_loc + d_dS_loc + d_dW_loc + d_dE_loc) / d_Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
d_num = (0.5*d_G2) - ((1.0/16.0)*(d_L*d_L)) ; // num (based on gradient and laplacian)
d_den = 1 + (0.25*d_L); // den (based on laplacian)
d_qsqr = d_num/(d_den*d_den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
d_den = (d_qsqr-d_q0sqr) / (d_q0sqr * (1+d_q0sqr)) ; // den (based on qsqr and q0sqr)
d_c_loc = 1.0 / (1.0+d_den) ; // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (d_c_loc < 0){ // if diffusion coefficient < 0
d_c_loc = 0; // ... set to 0
}
else if (d_c_loc > 1){ // if diffusion coefficient > 1
d_c_loc = 1; // ... set to 1
}
// save data to global memory
d_dN[ei] = d_dN_loc;
d_dS[ei] = d_dS_loc;
d_dW[ei] = d_dW_loc;
d_dE[ei] = d_dE_loc;
d_c[ei] = d_c_loc;
}
}
| 9796f7bb9acf3f99b5d9fdc85947c00a7dc81d70.cu | // BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY ACCESS
// srad kernel
__global__ void srad( fp d_lambda,
const int d_Nr,
const int d_Nc,
const long d_Ne,
const int *d_iN,
const int *d_iS,
const int *d_jE,
const int *d_jW,
fp *d_dN,
fp *d_dS,
fp *d_dE,
fp *d_dW,
const fp d_q0sqr,
fp *d_c,
const fp *d_I){
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
fp d_Jc;
fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc;
fp d_c_loc;
fp d_G2,d_L,d_num,d_den,d_qsqr;
// figure out row/col location in new matrix
row = (ei+1) % d_Nr - 1; // (0-n) row
col = (ei+1) / d_Nr + 1 - 1; // (0-n) column
if((ei+1) % d_Nr == 0){
row = d_Nr - 1;
col = col - 1;
}
if(ei<d_Ne){ // make sure that only threads matching jobs run
// directional derivatives, ICOV, diffusion coefficent
d_Jc = d_I[ei]; // get value of the current element
// directional derivates (every element of IMAGE)(try to copy to shared memory or temp files)
d_dN_loc = d_I[d_iN[row] + d_Nr*col] - d_Jc; // north direction derivative
d_dS_loc = d_I[d_iS[row] + d_Nr*col] - d_Jc; // south direction derivative
d_dW_loc = d_I[row + d_Nr*d_jW[col]] - d_Jc; // west direction derivative
d_dE_loc = d_I[row + d_Nr*d_jE[col]] - d_Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
d_G2 = (d_dN_loc*d_dN_loc + d_dS_loc*d_dS_loc + d_dW_loc*d_dW_loc + d_dE_loc*d_dE_loc) / (d_Jc*d_Jc); // gradient (based on derivatives)
// normalized discrete laplacian (equ 54)
d_L = (d_dN_loc + d_dS_loc + d_dW_loc + d_dE_loc) / d_Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
d_num = (0.5*d_G2) - ((1.0/16.0)*(d_L*d_L)) ; // num (based on gradient and laplacian)
d_den = 1 + (0.25*d_L); // den (based on laplacian)
d_qsqr = d_num/(d_den*d_den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
d_den = (d_qsqr-d_q0sqr) / (d_q0sqr * (1+d_q0sqr)) ; // den (based on qsqr and q0sqr)
d_c_loc = 1.0 / (1.0+d_den) ; // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (d_c_loc < 0){ // if diffusion coefficient < 0
d_c_loc = 0; // ... set to 0
}
else if (d_c_loc > 1){ // if diffusion coefficient > 1
d_c_loc = 1; // ... set to 1
}
// save data to global memory
d_dN[ei] = d_dN_loc;
d_dS[ei] = d_dS_loc;
d_dW[ei] = d_dW_loc;
d_dE[ei] = d_dE_loc;
d_c[ei] = d_c_loc;
}
}
|
38a7b4258e13eb4d11e78da96a3e4c81f09fbe37.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA RunTime API
#include <hip/hip_runtime.h>
//1024 * 1024
#define DATA_SIZE 1048576
int data[DATA_SIZE];
float clockRate = 1.0;
//0-9
void GenerateNumbers(int *number, int size)
{
for (int i = 0; i < size; i++) {
number[i] = rand() % 10;
}
}
//
void printDeviceProp(const hipDeviceProp_t &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %d.\n", (int)(prop.totalGlobalMem));
printf("sharedMemPerBlock : %d.\n", (int)prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", (int)prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", (int)prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", (int)prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
//CUDA
bool InitCUDA()
{
int count;
//Cuda
hipGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
//
for (i = 0; i < count; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
//gpu
printDeviceProp(prop);
//
clockRate = prop.clockRate;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
// __global__ (GPU)
__global__ static void sumOfSquares(int *num, int* result, clock_t* time)
{
int sum = 0;
int i;
clock_t start = clock();
for (i = 0; i < DATA_SIZE; i++) {
sum += num[i] * num[i] * num[i];
}
*result = sum;
*time = clock() - start;
}
int main()
{
//CUDA
if (!InitCUDA()) {
return 0;
}
//
GenerateNumbers(data, DATA_SIZE);
/**/
int* gpudata, *result;
clock_t* time;
//hipMalloc ( resulttime )
hipMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
hipMalloc((void**)&result, sizeof(int));
hipMalloc((void**)&time, sizeof(clock_t));
//hipMemcpy
//hipMemcpyHostToDevice -
//hipMemcpyDeviceToHost -
hipMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, hipMemcpyHostToDevice);
// CUDA <<<block , thread , shared memory >>>(...);
sumOfSquares << <1, 1, 0 >> >(gpudata, result, time);
/**/
int sum;
clock_t time_used;
//hipMemcpy
hipMemcpy(&sum, result, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&time_used, time, sizeof(clock_t), hipMemcpyDeviceToHost);
//Free
hipFree(gpudata);
hipFree(result);
hipFree(time);
printf("GPUsum: %d time_clock: %ld time: %fs\n", sum, (long)(time_used*1.0), ((float)(time_used / (clockRate * 1000))));
sum = 0;
for (int i = 0; i < DATA_SIZE; i++) {
sum += data[i] * data[i] * data[i];
}
printf("CPUsum: %d \n", sum);
return 0;
}
//1M 4byte 4M
//4MB/1.026178 = 3.89MB
//14Gb
//
//1. 2. 3.SM6warp2block
//4. shared memory
| 38a7b4258e13eb4d11e78da96a3e4c81f09fbe37.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA RunTime API
#include <cuda_runtime.h>
//1024 * 1024
#define DATA_SIZE 1048576
int data[DATA_SIZE];
float clockRate = 1.0;
//产生大量0-9之间的随机数
void GenerateNumbers(int *number, int size)
{
for (int i = 0; i < size; i++) {
number[i] = rand() % 10;
}
}
//打印设备属性
void printDeviceProp(const cudaDeviceProp &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %d.\n", (int)(prop.totalGlobalMem));
printf("sharedMemPerBlock : %d.\n", (int)prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", (int)prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", (int)prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", (int)prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
//CUDA 初始化
bool InitCUDA()
{
int count;
//取得支持Cuda的装置的数目
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
//取得显卡属性
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
//打印gpu设备信息
printDeviceProp(prop);
//获得显卡的始终频率
clockRate = prop.clockRate;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
// __global__ 函数 (GPU上执行) 计算立方和
__global__ static void sumOfSquares(int *num, int* result, clock_t* time)
{
int sum = 0;
int i;
clock_t start = clock();
for (i = 0; i < DATA_SIZE; i++) {
sum += num[i] * num[i] * num[i];
}
*result = sum;
*time = clock() - start;
}
int main()
{
//CUDA 初始化
if (!InitCUDA()) {
return 0;
}
//生成随机数
GenerateNumbers(data, DATA_SIZE);
/*把数据复制到显卡内存中*/
int* gpudata, *result;
clock_t* time;
//cudaMalloc 取得一块显卡内存 ( 其中result用来存储计算结果,time用来存储运行时间 )
cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
cudaMalloc((void**)&result, sizeof(int));
cudaMalloc((void**)&time, sizeof(clock_t));
//cudaMemcpy 将产生的随机数复制到显卡内存中
//cudaMemcpyHostToDevice - 从内存复制到显卡内存
//cudaMemcpyDeviceToHost - 从显卡内存复制到内存
cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
// 在CUDA 中执行函数 语法:函数名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...);
sumOfSquares << <1, 1, 0 >> >(gpudata, result, time);
/*把结果从显示芯片复制回主内存*/
int sum;
clock_t time_used;
//cudaMemcpy 将结果从显存中复制回内存
cudaMemcpy(&sum, result, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&time_used, time, sizeof(clock_t), cudaMemcpyDeviceToHost);
//Free
cudaFree(gpudata);
cudaFree(result);
cudaFree(time);
printf("GPUsum: %d time_clock: %ld time: %fs\n", sum, (long)(time_used*1.0), ((float)(time_used / (clockRate * 1000))));
sum = 0;
for (int i = 0; i < DATA_SIZE; i++) {
sum += data[i] * data[i] * data[i];
}
printf("CPUsum: %d \n", sum);
return 0;
}
//1M 4byte数据 一共是的数据量就是4M
//4MB/1.026178 = 3.89MB
//我显卡的带宽是14Gb左右 也就是说 完全没有实现并行计算的威力
//优化:
//1.一定要先从显存带宽开始 2.确定任务中并行和串行的算法 3.需要两层线程并行的内核函数,每个SM上面至少有6个warp和2个block
//4.共享内存 shared memory
|
51847ae810244893fc6d0118e8f90f74663c2b68.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "atomicInc_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int numIterations = 1;
unsigned int numInputs = 1;
float *d_probabilities = NULL;
hipMalloc(&d_probabilities, XSIZE*YSIZE);
unsigned int *d_quantity = NULL;
hipMalloc(&d_quantity, XSIZE*YSIZE);
unsigned int *d_count = NULL;
hipMalloc(&d_count, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
atomicInc_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numIterations,numInputs,d_probabilities,d_quantity,d_count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
atomicInc_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numIterations,numInputs,d_probabilities,d_quantity,d_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
atomicInc_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, numIterations,numInputs,d_probabilities,d_quantity,d_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 51847ae810244893fc6d0118e8f90f74663c2b68.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "atomicInc_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int numIterations = 1;
unsigned int numInputs = 1;
float *d_probabilities = NULL;
cudaMalloc(&d_probabilities, XSIZE*YSIZE);
unsigned int *d_quantity = NULL;
cudaMalloc(&d_quantity, XSIZE*YSIZE);
unsigned int *d_count = NULL;
cudaMalloc(&d_count, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
atomicInc_kernel<<<gridBlock,threadBlock>>>(numIterations,numInputs,d_probabilities,d_quantity,d_count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
atomicInc_kernel<<<gridBlock,threadBlock>>>(numIterations,numInputs,d_probabilities,d_quantity,d_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
atomicInc_kernel<<<gridBlock,threadBlock>>>(numIterations,numInputs,d_probabilities,d_quantity,d_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
08c26ed5216a547f0d77619eef43da2b1647b05f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include <hip/hip_runtime_api.h>
#include "cusolverSp_LOWLEVEL_PREVIEW.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define INPUTDENSEA prhs[0]
#define INPUTSPARSEB prhs[1]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=2)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTDENSEA);
if ((mxIsChar(INPUTDENSEA))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(INPUTSPARSEB);
if ((mxIsChar(INPUTSPARSEB))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTSPARSEB)) {
mxGPUArray const *INPUTDENSEGPUA;
mxGPUArray const *INPUTSPARSEGPUB;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA);
INPUTSPARSEGPUB = mxGPUCreateFromMxArray(INPUTSPARSEB);
if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (mxGPUIsSparse(INPUTSPARSEGPUB)) ){
const mwSize *dimsGPUSA;
dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA);
int numARows, numAColumns;
numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */
const mwSize *dimsGPUSB;
dimsGPUSB=mxGPUGetDimensions(INPUTSPARSEGPUB);
int numBRows, numBColumns;
numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */
numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
hipDoubleComplex const *d_A_dense;
d_A_dense = (hipDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA));
mwIndex nnz2;
mxArray * VLSXY2 = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUB);
nnz2 = *(mxGetJc(VLSXY2) + numBColumns);
int nnzB= static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(VLSXY2 , pointerrowB, nnzB);
hipDoubleComplex *pointervalB = (hipDoubleComplex *)mxGetComplexDoubles(VLSXY2);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB=(int *)mxGPUGetData(row_sortBB);
gpuErrchk(hipMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), hipMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *xval_sortB=(hipDoubleComplex*)mxGPUGetData(val_sortBB);
gpuErrchk(hipMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), hipMemcpyHostToDevice));
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mxDestroyArray(row_sortB);
mxDestroyArray(VLSXY2);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *dB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_B_dense = (hipDoubleComplex *)mxGPUGetData(dB_dense);
cusparseSafeCall(cusparseZsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
HIPSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; // gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//hipDoubleComplex *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
//gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice));
cusparseSafeCall(hipsparseZnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// hipDoubleComplex *d_A; // gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; //gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; //gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(hipsparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
//gpuErrchk(hipFree(d_A_dense));
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
hipDoubleComplex *VALOUT = (hipDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(dB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(hipFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
hipsparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
hipsparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTSPARSEB))){
// if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be hipDoubleComplex precision.");
// }
if((!mxIsSparse(INPUTDENSEA))&& (mxIsSparse(INPUTSPARSEB)) ){
mxInitGPU();
const mwSize *dimsCPUA;
dimsCPUA=mxGetDimensions(INPUTDENSEA);
int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */
const mwSize *dimsCPUB;
dimsCPUB=mxGetDimensions(INPUTSPARSEB);
int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */
int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
hipDoubleComplex *h_A_dense1;
h_A_dense1 = (hipDoubleComplex *)mxGetComplexDoubles(INPUTDENSEA);
mwIndex nnz2;
nnz2 = *(mxGetJc(INPUTSPARSEB) + numBColumns);
int nnzB= static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(INPUTSPARSEB , pointerrowB, nnzB);
hipDoubleComplex *pointervalB = (hipDoubleComplex *)mxGetComplexDoubles(INPUTSPARSEB);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB=(int *)mxGPUGetData(row_sortBB);
gpuErrchk(hipMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), hipMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *xval_sortB=(hipDoubleComplex*)mxGPUGetData(val_sortBB);
gpuErrchk(hipMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), hipMemcpyHostToDevice));
mxDestroyArray(row_sortB);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *DB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_B_dense = (hipDoubleComplex *)mxGPUGetData(DB_dense);
cusparseSafeCall(cusparseZsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
HIPSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//hipDoubleComplex *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A_dense = (hipDoubleComplex *)mxGPUGetData(OUTMA);
gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice));
cusparseSafeCall(hipsparseZnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// hipDoubleComplex *d_A; // gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; // gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; // gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(hipsparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(OUTMA);
//gpuErrchk(hipFree(d_nnzPerVectorA));
mxGPUDestroyGPUArray(PerVect);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
hipDoubleComplex *VALOUT = (hipDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(DB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(hipFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
hipsparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
hipsparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
| 08c26ed5216a547f0d77619eef43da2b1647b05f.cu |
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include <cuda_runtime_api.h>
#include "cusolverSp_LOWLEVEL_PREVIEW.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define INPUTDENSEA prhs[0]
#define INPUTSPARSEB prhs[1]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=2)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTDENSEA);
if ((mxIsChar(INPUTDENSEA))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(INPUTSPARSEB);
if ((mxIsChar(INPUTSPARSEB))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTSPARSEB)) {
mxGPUArray const *INPUTDENSEGPUA;
mxGPUArray const *INPUTSPARSEGPUB;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA);
INPUTSPARSEGPUB = mxGPUCreateFromMxArray(INPUTSPARSEB);
if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (mxGPUIsSparse(INPUTSPARSEGPUB)) ){
const mwSize *dimsGPUSA;
dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA);
int numARows, numAColumns;
numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */
const mwSize *dimsGPUSB;
dimsGPUSB=mxGPUGetDimensions(INPUTSPARSEGPUB);
int numBRows, numBColumns;
numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */
numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
cuDoubleComplex const *d_A_dense;
d_A_dense = (cuDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA));
mwIndex nnz2;
mxArray * VLSXY2 = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUB);
nnz2 = *(mxGetJc(VLSXY2) + numBColumns);
int nnzB= static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(VLSXY2 , pointerrowB, nnzB);
cuDoubleComplex *pointervalB = (cuDoubleComplex *)mxGetComplexDoubles(VLSXY2);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB=(int *)mxGPUGetData(row_sortBB);
gpuErrchk(cudaMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), cudaMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *xval_sortB=(cuDoubleComplex*)mxGPUGetData(val_sortBB);
gpuErrchk(cudaMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), cudaMemcpyHostToDevice));
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mxDestroyArray(row_sortB);
mxDestroyArray(VLSXY2);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *dB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_B_dense = (cuDoubleComplex *)mxGPUGetData(dB_dense);
cusparseSafeCall(cusparseZsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
CUSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; // gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//cuDoubleComplex *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
//gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
cusparseSafeCall(cusparseZnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// cuDoubleComplex *d_A; // gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; //gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; //gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(cusparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
//gpuErrchk(cudaFree(d_A_dense));
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
cuDoubleComplex *VALOUT = (cuDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(dB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(cudaFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
cusparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
cusparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTSPARSEB))){
// if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be cuDoubleComplex precision.");
// }
if((!mxIsSparse(INPUTDENSEA))&& (mxIsSparse(INPUTSPARSEB)) ){
mxInitGPU();
const mwSize *dimsCPUA;
dimsCPUA=mxGetDimensions(INPUTDENSEA);
int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */
const mwSize *dimsCPUB;
dimsCPUB=mxGetDimensions(INPUTSPARSEB);
int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */
int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
cuDoubleComplex *h_A_dense1;
h_A_dense1 = (cuDoubleComplex *)mxGetComplexDoubles(INPUTDENSEA);
mwIndex nnz2;
nnz2 = *(mxGetJc(INPUTSPARSEB) + numBColumns);
int nnzB= static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(INPUTSPARSEB , pointerrowB, nnzB);
cuDoubleComplex *pointervalB = (cuDoubleComplex *)mxGetComplexDoubles(INPUTSPARSEB);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB=(int *)mxGPUGetData(row_sortBB);
gpuErrchk(cudaMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), cudaMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *xval_sortB=(cuDoubleComplex*)mxGPUGetData(val_sortBB);
gpuErrchk(cudaMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), cudaMemcpyHostToDevice));
mxDestroyArray(row_sortB);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *DB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_B_dense = (cuDoubleComplex *)mxGPUGetData(DB_dense);
cusparseSafeCall(cusparseZsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
CUSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//cuDoubleComplex *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A_dense = (cuDoubleComplex *)mxGPUGetData(OUTMA);
gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
cusparseSafeCall(cusparseZnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// cuDoubleComplex *d_A; // gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; // gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; // gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(cusparseZdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(OUTMA);
//gpuErrchk(cudaFree(d_nnzPerVectorA));
mxGPUDestroyGPUArray(PerVect);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpZcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpZcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpZcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_INITIALIZE_VALUES);
cuDoubleComplex *VALOUT = (cuDoubleComplex *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpZcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(DB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(cudaFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
cusparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
cusparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
|
6f596a8630be1caf2a797fb43cf3a2d404026872.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./kern.cuh"
#include "kern_helper_hip.cuh"
#include "hip/hip_fp16.h"
#include "src/cuda/cub/util_ptx.cuh"
#include "src/cuda/fp16_help.cuh"
const uint32_t WARP_SIZE = 32, BATCH_UNROLL = 4;
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
/*!
* \brief compute grad w.r.t. filter
*
* block dim: out_id * kern_id
* threads with the same out_id computes grad for corresponding kernel element
* \tparam nr_thpf number of threads for one element in the filter; must be
* power of 2;
*/
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_float(
T* flt_grad, const T* src, const T* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = blockDim.x / nr_thpf, THREADID_X = threadIdx.x / nr_thpf,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X;
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_h = oh_hi - oh_lo,
oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL),
tid = threadIdx.x % nr_thpf;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX] = 0;
return;
}
T sum(0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = oh * OW + ow + n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum += src[soff] * dst_grad[doff];
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum;
} else {
// reduce all sums in a block
extern __shared__ uint8_t shared_storage[];
volatile T* thread_sum = reinterpret_cast<T*>(shared_storage);
thread_sum += THREADID_X * nr_thpf;
thread_sum[tid] = sum;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
T v0 = thread_sum[tid], v1 = v0 + thread_sum[tid + i];
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0];
}
}
}
#if TORCH_HIP_VERSION >= 9000
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_hf(
__half* flt_grad, const __half* src, const __half* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = (blockDim.x / nr_thpf) * 2,
THREADID_X = (threadIdx.x / nr_thpf) * 2,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X,
LAST_IDX = FH * FW * CHL_MUL * IC, tid = threadIdx.x % nr_thpf;
__half2 sum2{0.0, 0.0};
if (OUT_IDX % FW != FW - 1) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lox = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_loy = max(int32_t(PW - fw + SW - 2), 0) / SW,
ow_hix = min((IW - 1 + PW - fw) / SW + 1, OW),
ow_hiy = min((IW - 2 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_wx = ow_hix - ow_lox,
oblk_wy = ow_hiy - ow_loy;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1) {
if (!tid) {
flt_grad[OUT_IDX] = 0;
flt_grad[OUT_IDX + 1] = 0;
}
return;
}
if (ow_lox >= ow_hix) {
if (!tid)
flt_grad[OUT_IDX] = 0;
}
if (IW + PW < fw + 2 || ow_loy >= ow_hiy) {
if (!tid)
flt_grad[OUT_IDX + 1] = 0;
if (ow_lox >= ow_hix)
return;
}
sum2.x = 0.0;
sum2.y = 0.0;
__half2 src2{0.0, 0.0};
__half2 dst2{0.0, 0.0};
const uint32_t oblk_w = max(ow_hix, ow_hiy) - min(ow_lox, ow_loy),
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n_x, n_y, oh, ow_x, ow_y;
n_x = div_mod(div_mod(oblk_idx, oblk_wx, ow_x), oblk_h, oh) * BATCH_UNROLL;
n_y = div_mod(div_mod(oblk_idx, oblk_wy, ow_y), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow_x += ow_lox;
ow_y += ow_loy;
uint32_t ih = oh * SH - PH + fh, iw_x = ow_x * SW - PW + fw,
iw_y = ow_y * SW - PW + fw + 1,
soff_x = ih * IW + iw_x + n_x * SRC_BATCH_STRIDE,
soff_y = ih * IW + iw_y + n_y * SRC_BATCH_STRIDE,
doff_x = oh * OW + ow_x + n_x * DST_BATCH_STRIDE,
doff_y = oh * OW + ow_y + n_y * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n_x + i < N || n_y + i < N) {
src2.x = 0.0;
src2.y = 0.0;
dst2.x = 0.0;
dst2.y = 0.0;
if (n_x + i < N && ow_x < ow_hix) {
src2.x = src[soff_x];
dst2.x = dst_grad[doff_x];
}
if (n_y + i < N && ow_y < ow_hiy) {
src2.y = src[soff_y];
dst2.y = dst_grad[doff_y];
}
sum2 = fma2(src2, dst2, sum2);
}
soff_x += SRC_BATCH_STRIDE;
soff_y += SRC_BATCH_STRIDE;
doff_x += DST_BATCH_STRIDE;
doff_y += DST_BATCH_STRIDE;
}
}
} else {
for (size_t offset = 0; offset < 2; ++offset) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX + offset;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
if (offset == 0)
return;
else
break;
}
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 ||
ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX + offset] = 0;
continue;
}
__half sum(0.0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ic * IH * IW + ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = (ic * CHL_MUL + chl_mul) * OH * OW + oh * OW + ow +
n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum = fma(src[soff], dst_grad[doff], sum);
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (!offset)
sum2.x = sum;
if (offset)
sum2.y = sum;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum2.x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = sum2.y;
} else {
extern __shared__ uint8_t shared_storage[];
__half2* thread_sum = reinterpret_cast<__half2*>(shared_storage);
thread_sum += THREADID_X * nr_thpf / 2;
thread_sum[tid] = sum2;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
__half2 one = {1.0, 1.0};
__half2 v0 = thread_sum[tid], v1 = fma2(v0, one, thread_sum[tid + i]);
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0].x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = thread_sum[0].y;
}
}
}
#endif
#define GET_KERN(func, type) \
FixFunction<type> f_struct; \
switch (_p) { \
case 1 << 10: \
f_struct.f = func<type, 1 << 10>; \
break; \
case 1 << 9: \
f_struct.f = func<type, 1 << 9>; \
break; \
case 1 << 8: \
f_struct.f = func<type, 1 << 8>; \
break; \
case 1 << 7: \
f_struct.f = func<type, 1 << 7>; \
break; \
case 1 << 6: \
f_struct.f = func<type, 1 << 6>; \
break; \
case 1 << 5: \
f_struct.f = func<type, 1 << 5>; \
break; \
case 1 << 4: \
f_struct.f = func<type, 1 << 4>; \
break; \
case 1 << 3: \
f_struct.f = func<type, 1 << 3>; \
break; \
case 1 << 2: \
f_struct.f = func<type, 1 << 2>; \
break; \
case 1 << 1: \
f_struct.f = func<type, 1 << 1>; \
break; \
case 1 << 0: \
f_struct.f = func<type, 1 << 0>; \
break; \
default: \
megdnn_assert(false, "DO NOT IMP CASE FUNCTION!!"); \
} \
return f_struct;
template <typename T>
struct FixFunction {
void (*f)(T*, const T*, const T*, Param);
};
template <typename T>
FixFunction<T> get_kern(const uint32_t& _p);
template <>
FixFunction<float> get_kern<float>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, float);
}
#if TORCH_HIP_VERSION >= 9000
template <>
FixFunction<__half> get_kern<__half>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_hf, __half);
}
#endif
template <>
FixFunction<dt_float16> get_kern<dt_float16>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, dt_float16);
}
#undef GET_KERN
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
template <typename T>
void run_bwd_filter(
T* filter_grad, const T* src, const T* dst_grad, const Param& param,
hipStream_t stream) {
void (*kern)(T*, const T*, const T*, Param) = NULL;
uint32_t nr_thread = query_blocksize_for_kernel(get_kern<T>(1024).f),
nr_thpf = ::min(
nr_thread, std::max<uint32_t>(
1, param.out_h * param.out_w * param.batch /
(BATCH_UNROLL * 16)));
// find nearest power-of-2 of nr_thpf
do {
#define CK(_n) \
if (nr_thpf >= _n) { \
kern = get_kern<T>(_n).f; \
nr_thpf = _n; \
break; \
}
CK(1 << 10);
CK(1 << 9);
CK(1 << 8);
CK(1 << 7);
CK(1 << 6);
CK(1 << 5);
CK(1 << 4);
CK(1 << 3);
CK(1 << 2);
CK(1 << 1);
CK(1 << 0);
#undef CK
} while (0);
megdnn_assert(kern);
nr_thread = query_blocksize_for_kernel(kern);
uint32_t nr_flt_per_blk = nr_thread / nr_thpf;
while (nr_flt_per_blk * nr_thpf % WARP_SIZE)
--nr_flt_per_blk;
megdnn_assert(nr_flt_per_blk);
int nr_block = DIVUP(
param.flt_h * param.flt_w * param.src_chl * param.chl_mul, nr_flt_per_blk);
nr_thread = nr_flt_per_blk * nr_thpf;
uint32_t shared = nr_thread * 2 * sizeof(T);
hipLaunchKernelGGL(( kern), dim3(nr_block), dim3(nr_thread), shared, stream, filter_grad, src, dst_grad, param);
after_kernel_launch();
}
template void run_bwd_filter(
float*, const float*, const float*, const Param&, hipStream_t);
#if TORCH_HIP_VERSION >= 9000
template void run_bwd_filter(
__half*, const __half*, const __half*, const Param&, hipStream_t);
#endif
template void run_bwd_filter(
dt_float16*, const dt_float16*, const dt_float16*, const Param&, hipStream_t);
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
| 6f596a8630be1caf2a797fb43cf3a2d404026872.cu | #include "./kern.cuh"
#include "./kern_helper.cuh"
#include "cuda_fp16.h"
#include "src/cuda/cub/util_ptx.cuh"
#include "src/cuda/fp16_help.cuh"
const uint32_t WARP_SIZE = 32, BATCH_UNROLL = 4;
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
/*!
* \brief compute grad w.r.t. filter
*
* block dim: out_id * kern_id
* threads with the same out_id computes grad for corresponding kernel element
* \tparam nr_thpf number of threads for one element in the filter; must be
* power of 2;
*/
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_float(
T* flt_grad, const T* src, const T* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = blockDim.x / nr_thpf, THREADID_X = threadIdx.x / nr_thpf,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X;
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_h = oh_hi - oh_lo,
oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL),
tid = threadIdx.x % nr_thpf;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX] = 0;
return;
}
T sum(0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = oh * OW + ow + n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum += src[soff] * dst_grad[doff];
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum;
} else {
// reduce all sums in a block
extern __shared__ uint8_t shared_storage[];
volatile T* thread_sum = reinterpret_cast<T*>(shared_storage);
thread_sum += THREADID_X * nr_thpf;
thread_sum[tid] = sum;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
T v0 = thread_sum[tid], v1 = v0 + thread_sum[tid + i];
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0];
}
}
}
#if CUDA_VERSION >= 9000
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_hf(
__half* flt_grad, const __half* src, const __half* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = (blockDim.x / nr_thpf) * 2,
THREADID_X = (threadIdx.x / nr_thpf) * 2,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X,
LAST_IDX = FH * FW * CHL_MUL * IC, tid = threadIdx.x % nr_thpf;
__half2 sum2{0.0, 0.0};
if (OUT_IDX % FW != FW - 1) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lox = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_loy = max(int32_t(PW - fw + SW - 2), 0) / SW,
ow_hix = min((IW - 1 + PW - fw) / SW + 1, OW),
ow_hiy = min((IW - 2 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_wx = ow_hix - ow_lox,
oblk_wy = ow_hiy - ow_loy;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1) {
if (!tid) {
flt_grad[OUT_IDX] = 0;
flt_grad[OUT_IDX + 1] = 0;
}
return;
}
if (ow_lox >= ow_hix) {
if (!tid)
flt_grad[OUT_IDX] = 0;
}
if (IW + PW < fw + 2 || ow_loy >= ow_hiy) {
if (!tid)
flt_grad[OUT_IDX + 1] = 0;
if (ow_lox >= ow_hix)
return;
}
sum2.x = 0.0;
sum2.y = 0.0;
__half2 src2{0.0, 0.0};
__half2 dst2{0.0, 0.0};
const uint32_t oblk_w = max(ow_hix, ow_hiy) - min(ow_lox, ow_loy),
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n_x, n_y, oh, ow_x, ow_y;
n_x = div_mod(div_mod(oblk_idx, oblk_wx, ow_x), oblk_h, oh) * BATCH_UNROLL;
n_y = div_mod(div_mod(oblk_idx, oblk_wy, ow_y), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow_x += ow_lox;
ow_y += ow_loy;
uint32_t ih = oh * SH - PH + fh, iw_x = ow_x * SW - PW + fw,
iw_y = ow_y * SW - PW + fw + 1,
soff_x = ih * IW + iw_x + n_x * SRC_BATCH_STRIDE,
soff_y = ih * IW + iw_y + n_y * SRC_BATCH_STRIDE,
doff_x = oh * OW + ow_x + n_x * DST_BATCH_STRIDE,
doff_y = oh * OW + ow_y + n_y * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n_x + i < N || n_y + i < N) {
src2.x = 0.0;
src2.y = 0.0;
dst2.x = 0.0;
dst2.y = 0.0;
if (n_x + i < N && ow_x < ow_hix) {
src2.x = src[soff_x];
dst2.x = dst_grad[doff_x];
}
if (n_y + i < N && ow_y < ow_hiy) {
src2.y = src[soff_y];
dst2.y = dst_grad[doff_y];
}
sum2 = fma2(src2, dst2, sum2);
}
soff_x += SRC_BATCH_STRIDE;
soff_y += SRC_BATCH_STRIDE;
doff_x += DST_BATCH_STRIDE;
doff_y += DST_BATCH_STRIDE;
}
}
} else {
for (size_t offset = 0; offset < 2; ++offset) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX + offset;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
if (offset == 0)
return;
else
break;
}
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 ||
ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX + offset] = 0;
continue;
}
__half sum(0.0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ic * IH * IW + ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = (ic * CHL_MUL + chl_mul) * OH * OW + oh * OW + ow +
n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum = fma(src[soff], dst_grad[doff], sum);
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (!offset)
sum2.x = sum;
if (offset)
sum2.y = sum;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum2.x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = sum2.y;
} else {
extern __shared__ uint8_t shared_storage[];
__half2* thread_sum = reinterpret_cast<__half2*>(shared_storage);
thread_sum += THREADID_X * nr_thpf / 2;
thread_sum[tid] = sum2;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
__half2 one = {1.0, 1.0};
__half2 v0 = thread_sum[tid], v1 = fma2(v0, one, thread_sum[tid + i]);
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0].x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = thread_sum[0].y;
}
}
}
#endif
#define GET_KERN(func, type) \
FixFunction<type> f_struct; \
switch (_p) { \
case 1 << 10: \
f_struct.f = func<type, 1 << 10>; \
break; \
case 1 << 9: \
f_struct.f = func<type, 1 << 9>; \
break; \
case 1 << 8: \
f_struct.f = func<type, 1 << 8>; \
break; \
case 1 << 7: \
f_struct.f = func<type, 1 << 7>; \
break; \
case 1 << 6: \
f_struct.f = func<type, 1 << 6>; \
break; \
case 1 << 5: \
f_struct.f = func<type, 1 << 5>; \
break; \
case 1 << 4: \
f_struct.f = func<type, 1 << 4>; \
break; \
case 1 << 3: \
f_struct.f = func<type, 1 << 3>; \
break; \
case 1 << 2: \
f_struct.f = func<type, 1 << 2>; \
break; \
case 1 << 1: \
f_struct.f = func<type, 1 << 1>; \
break; \
case 1 << 0: \
f_struct.f = func<type, 1 << 0>; \
break; \
default: \
megdnn_assert(false, "DO NOT IMP CASE FUNCTION!!"); \
} \
return f_struct;
template <typename T>
struct FixFunction {
void (*f)(T*, const T*, const T*, Param);
};
template <typename T>
FixFunction<T> get_kern(const uint32_t& _p);
template <>
FixFunction<float> get_kern<float>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, float);
}
#if CUDA_VERSION >= 9000
template <>
FixFunction<__half> get_kern<__half>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_hf, __half);
}
#endif
template <>
FixFunction<dt_float16> get_kern<dt_float16>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, dt_float16);
}
#undef GET_KERN
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
template <typename T>
void run_bwd_filter(
T* filter_grad, const T* src, const T* dst_grad, const Param& param,
cudaStream_t stream) {
void (*kern)(T*, const T*, const T*, Param) = NULL;
uint32_t nr_thread = query_blocksize_for_kernel(get_kern<T>(1024).f),
nr_thpf = std::min(
nr_thread, std::max<uint32_t>(
1, param.out_h * param.out_w * param.batch /
(BATCH_UNROLL * 16)));
// find nearest power-of-2 of nr_thpf
do {
#define CK(_n) \
if (nr_thpf >= _n) { \
kern = get_kern<T>(_n).f; \
nr_thpf = _n; \
break; \
}
CK(1 << 10);
CK(1 << 9);
CK(1 << 8);
CK(1 << 7);
CK(1 << 6);
CK(1 << 5);
CK(1 << 4);
CK(1 << 3);
CK(1 << 2);
CK(1 << 1);
CK(1 << 0);
#undef CK
} while (0);
megdnn_assert(kern);
nr_thread = query_blocksize_for_kernel(kern);
uint32_t nr_flt_per_blk = nr_thread / nr_thpf;
while (nr_flt_per_blk * nr_thpf % WARP_SIZE)
--nr_flt_per_blk;
megdnn_assert(nr_flt_per_blk);
int nr_block = DIVUP(
param.flt_h * param.flt_w * param.src_chl * param.chl_mul, nr_flt_per_blk);
nr_thread = nr_flt_per_blk * nr_thpf;
uint32_t shared = nr_thread * 2 * sizeof(T);
kern<<<nr_block, nr_thread, shared, stream>>>(filter_grad, src, dst_grad, param);
after_kernel_launch();
}
template void run_bwd_filter(
float*, const float*, const float*, const Param&, cudaStream_t);
#if CUDA_VERSION >= 9000
template void run_bwd_filter(
__half*, const __half*, const __half*, const Param&, cudaStream_t);
#endif
template void run_bwd_filter(
dt_float16*, const dt_float16*, const dt_float16*, const Param&, cudaStream_t);
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
5354d6329f5a6719819aac14244c57857bb918c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_hdv3.cuh"
__global__ void hyperdifvisc3_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim,int hand)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt1=0,max3=0, maxt2=0;
int ip,jp,ipg,jpg;
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
int bfac1,bfac2,bfac3;
//int bfac1=(field==rho || field>mom2)+(field>rho && field<energy);
//int bfac2= (field==rho || field>mom2);
//int bfac3=(field>rho && field<energy);
int shift=order*NVAR*(p->n[0])*(p->n[1]);
//tmp1 tmp_nuI
//tmp2 d3r
//tmp3 d1r
//tmp4 md3r
//tmp5 md1r
//tmp6 d3l
//tmp7 d1l
//tmp8 md3l
//tmp9 md1l
//compute md3r and md1r
//tmp4 md3r
//tmp5 md1r
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if( i>1 && j>1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
maxt1=0;
for(is=-(dim==0); is<=(dim==0); is++)
for(js=-(dim==1); js<=(dim==1); js++)
{
if(wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d3)]>maxt1)
maxt1=wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d3)];
}
wtemp[fencode_hdv3(p,i,j,tmp4)]=maxt1;
maxt2=0;
for(is=-2*(dim==0); is<=2*(dim==0); is++)
for(js=-2*(dim==1); js<=2*(dim==1); js++)
{
if(wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d1)]>maxt2)
maxt2=wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d1)];
}
wtemp[fencode_hdv3(p,i,j,tmp5)]=maxt2;
}
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdv3(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifvisc3(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, real **d_wtemp, real **d_wtemp1, real **d_wtemp2, int field, int dim,int hand)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hipLaunchKernelGGL(( hyperdifvisc3_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim,hand);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_hdv3,*d_u,*d_v,*d_h);
//printf("called prop\n");
hipDeviceSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv3,*d_w,*d_wnew);
//printf("called boundary\n");
//hipDeviceSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv3,*d_w,*d_wnew);
//printf("called update\n");
// hipDeviceSynchronize();
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_hdv3, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
| 5354d6329f5a6719819aac14244c57857bb918c5.cu | #include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_hdv3.cuh"
__global__ void hyperdifvisc3_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order, real *wtemp, real *wtemp1, real *wtemp2, int field, int dim,int hand)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt1=0,max3=0, maxt2=0;
int ip,jp,ipg,jpg;
jp=iindex/(ni/(p->npgp[0]));
ip=iindex-(jp*(ni/(p->npgp[0])));
int bfac1,bfac2,bfac3;
//int bfac1=(field==rho || field>mom2)+(field>rho && field<energy);
//int bfac2= (field==rho || field>mom2);
//int bfac3=(field>rho && field<energy);
int shift=order*NVAR*(p->n[0])*(p->n[1]);
//tmp1 tmp_nuI
//tmp2 d3r
//tmp3 d1r
//tmp4 md3r
//tmp5 md1r
//tmp6 d3l
//tmp7 d1l
//tmp8 md3l
//tmp9 md1l
//compute md3r and md1r
//tmp4 md3r
//tmp5 md1r
for(ipg=0;ipg<(p->npgp[0]);ipg++)
for(jpg=0;jpg<(p->npgp[1]);jpg++)
{
i=ip*(p->npgp[0])+ipg;
j=jp*(p->npgp[1])+jpg;
if( i>1 && j>1 && i<((p->n[0])-2) && j<((p->n[1])-2))
{
maxt1=0;
for(is=-(dim==0); is<=(dim==0); is++)
for(js=-(dim==1); js<=(dim==1); js++)
{
if(wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d3)]>maxt1)
maxt1=wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d3)];
}
wtemp[fencode_hdv3(p,i,j,tmp4)]=maxt1;
maxt2=0;
for(is=-2*(dim==0); is<=2*(dim==0); is++)
for(js=-2*(dim==1); js<=2*(dim==1); js++)
{
if(wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d1)]>maxt2)
maxt2=wtemp1[fencode_hdv3(p,i+1+is,j+1+js,d1)];
}
wtemp[fencode_hdv3(p,i,j,tmp5)]=maxt2;
}
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdv3(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifvisc3(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, real **d_wtemp, real **d_wtemp1, real **d_wtemp2, int field, int dim,int hand)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hyperdifvisc3_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order, *d_wtemp,*d_wtemp1,*d_wtemp2, field, dim,hand);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_hdv3,*d_u,*d_v,*d_h);
//printf("called prop\n");
cudaThreadSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv3,*d_w,*d_wnew);
//printf("called boundary\n");
//cudaThreadSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv3,*d_w,*d_wnew);
//printf("called update\n");
// cudaThreadSynchronize();
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_hdv3, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
4e8dbdb5969dd6aa84ec27bad2b165b4af0e05df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ncs/cuda/CUDA.h>
#include <ncs/sim/CUDA.h>
#include "NCS.cuh"
namespace cuda {
__global__ void updateNeuronsKernel(const int* old_spike_shape_state,
const float* old_voltage,
const float* old_calcium,
const float* input_current,
const float* synaptic_current,
const float* channel_current,
const float* resting_potential,
const float* voltage_persistence,
const float* dt_over_capacitance,
const unsigned int* spike_shape_length,
const float* calcium_spike_increment,
const float* calcium_persistence,
const float* const* spike_shape,
const float* threshold,
ncs::sim::Bit::Word* neuron_fire_bits,
float* new_voltage,
int* new_spike_shape_state,
float* new_calcium,
float* device_neuron_voltage,
unsigned int num_neurons) {
extern __shared__ ncs::sim::Bit::Word shared_fire_vector[];
unsigned int& warp_result = shared_fire_vector[threadIdx.x];
unsigned int* result_vector_base = shared_fire_vector + warp::index() * 32;
unsigned int warp_thread = warp::thread();
unsigned int limit = math::ceiling(num_neurons, 32);
unsigned int mask = bit::mask(warp_thread);
for (size_t i = grid::thread(); i < limit; i += grid::stride()) {
warp_result = 0;
if (i < num_neurons) {
int spike_shape_state = old_spike_shape_state[i];
float voltage = old_voltage[i];
float calcium = old_calcium[i];
float total_current = input_current[i] +
synaptic_current[i] + channel_current[i];
if (spike_shape_state < 0) { // Do real computations
float vm_rest = resting_potential[i];
float dv = voltage - vm_rest;
voltage = vm_rest +
dv * voltage_persistence[i] +
dt_over_capacitance[i] * total_current;
if (voltage > threshold[i]) {
spike_shape_state = spike_shape_length[i] - 1;
calcium += calcium_spike_increment[i];
warp_result = mask;
}
}
if (spike_shape_state >= 0) { // Still following spike shape
voltage = spike_shape[i][spike_shape_state];
spike_shape_state--;
}
calcium *= calcium_persistence[i];
new_voltage[i] = voltage;
new_spike_shape_state[i] = spike_shape_state;
new_calcium[i] = calcium;
device_neuron_voltage[i] = voltage;
}
warp::reduceOr(result_vector_base, warp_thread);
if (warp::leader()) {
neuron_fire_bits[bit::word(i)] = warp_result;
}
}
}
void updateNeurons(const int* old_spike_shape_state,
const float* old_voltage,
const float* old_calcium,
const float* input_current,
const float* synaptic_current,
const float* channel_current,
const float* resting_potential,
const float* voltage_persistence,
const float* dt_over_capacitance,
const unsigned int* spike_shape_length,
const float* calcium_spike_increment,
const float* calcium_persistence,
const float* const* spike_shape,
const float* threshold,
ncs::sim::Bit::Word* neuron_fire_bits,
float* new_voltage,
int* new_spike_shape_state,
float* new_calcium,
float* device_neuron_voltage,
unsigned int num_neurons) {
unsigned int threads_per_block =
ncs::sim::CUDA::getThreadsPerBlock(num_neurons);
unsigned int num_blocks = ncs::sim::CUDA::getNumberOfBlocks(num_neurons);
unsigned int shared_memory_size =
sizeof(ncs::sim::Bit::Word) * threads_per_block;
hipLaunchKernelGGL(( updateNeuronsKernel), dim3(num_blocks),
dim3(threads_per_block),
shared_memory_size,
ncs::sim::CUDA::getStream(), old_spike_shape_state,
old_voltage,
old_calcium,
input_current,
synaptic_current,
channel_current,
resting_potential,
voltage_persistence,
dt_over_capacitance,
spike_shape_length,
calcium_spike_increment,
calcium_persistence,
spike_shape,
threshold,
neuron_fire_bits,
new_voltage,
new_spike_shape_state,
new_calcium,
device_neuron_voltage,
num_neurons);
ncs::sim::CUDA::synchronize();
}
} // namespace cuda
| 4e8dbdb5969dd6aa84ec27bad2b165b4af0e05df.cu | #include <ncs/cuda/CUDA.h>
#include <ncs/sim/CUDA.h>
#include "NCS.cuh"
namespace cuda {
__global__ void updateNeuronsKernel(const int* old_spike_shape_state,
const float* old_voltage,
const float* old_calcium,
const float* input_current,
const float* synaptic_current,
const float* channel_current,
const float* resting_potential,
const float* voltage_persistence,
const float* dt_over_capacitance,
const unsigned int* spike_shape_length,
const float* calcium_spike_increment,
const float* calcium_persistence,
const float* const* spike_shape,
const float* threshold,
ncs::sim::Bit::Word* neuron_fire_bits,
float* new_voltage,
int* new_spike_shape_state,
float* new_calcium,
float* device_neuron_voltage,
unsigned int num_neurons) {
extern __shared__ ncs::sim::Bit::Word shared_fire_vector[];
unsigned int& warp_result = shared_fire_vector[threadIdx.x];
unsigned int* result_vector_base = shared_fire_vector + warp::index() * 32;
unsigned int warp_thread = warp::thread();
unsigned int limit = math::ceiling(num_neurons, 32);
unsigned int mask = bit::mask(warp_thread);
for (size_t i = grid::thread(); i < limit; i += grid::stride()) {
warp_result = 0;
if (i < num_neurons) {
int spike_shape_state = old_spike_shape_state[i];
float voltage = old_voltage[i];
float calcium = old_calcium[i];
float total_current = input_current[i] +
synaptic_current[i] + channel_current[i];
if (spike_shape_state < 0) { // Do real computations
float vm_rest = resting_potential[i];
float dv = voltage - vm_rest;
voltage = vm_rest +
dv * voltage_persistence[i] +
dt_over_capacitance[i] * total_current;
if (voltage > threshold[i]) {
spike_shape_state = spike_shape_length[i] - 1;
calcium += calcium_spike_increment[i];
warp_result = mask;
}
}
if (spike_shape_state >= 0) { // Still following spike shape
voltage = spike_shape[i][spike_shape_state];
spike_shape_state--;
}
calcium *= calcium_persistence[i];
new_voltage[i] = voltage;
new_spike_shape_state[i] = spike_shape_state;
new_calcium[i] = calcium;
device_neuron_voltage[i] = voltage;
}
warp::reduceOr(result_vector_base, warp_thread);
if (warp::leader()) {
neuron_fire_bits[bit::word(i)] = warp_result;
}
}
}
void updateNeurons(const int* old_spike_shape_state,
const float* old_voltage,
const float* old_calcium,
const float* input_current,
const float* synaptic_current,
const float* channel_current,
const float* resting_potential,
const float* voltage_persistence,
const float* dt_over_capacitance,
const unsigned int* spike_shape_length,
const float* calcium_spike_increment,
const float* calcium_persistence,
const float* const* spike_shape,
const float* threshold,
ncs::sim::Bit::Word* neuron_fire_bits,
float* new_voltage,
int* new_spike_shape_state,
float* new_calcium,
float* device_neuron_voltage,
unsigned int num_neurons) {
unsigned int threads_per_block =
ncs::sim::CUDA::getThreadsPerBlock(num_neurons);
unsigned int num_blocks = ncs::sim::CUDA::getNumberOfBlocks(num_neurons);
unsigned int shared_memory_size =
sizeof(ncs::sim::Bit::Word) * threads_per_block;
updateNeuronsKernel<<<num_blocks,
threads_per_block,
shared_memory_size,
ncs::sim::CUDA::getStream()>>>(old_spike_shape_state,
old_voltage,
old_calcium,
input_current,
synaptic_current,
channel_current,
resting_potential,
voltage_persistence,
dt_over_capacitance,
spike_shape_length,
calcium_spike_increment,
calcium_persistence,
spike_shape,
threshold,
neuron_fire_bits,
new_voltage,
new_spike_shape_state,
new_calcium,
device_neuron_voltage,
num_neurons);
ncs::sim::CUDA::synchronize();
}
} // namespace cuda
|
ccf5354a066a738002ca22240a3d6a943a26d033.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<float3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| ccf5354a066a738002ca22240a3d6a943a26d033.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<float3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
bdbdb201b3955f68c80a20bd8e06aa8dc655ca23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY
// ACCESS
// srad kernel
__global__ void srad(fp d_lambda, int d_Nr, int d_Nc, long d_Ne,
fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW, fp d_q0sqr, fp *d_c, fp *d_I)
{
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
int iN, iS, jW, jE;
// variables
fp d_Jc;
fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc;
fp d_c_loc;
fp d_G2, d_L, d_num, d_den, d_qsqr;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
if(row == 0) {
iN = 0;
}else{
iN = row -1;
}
if(row == d_Nr -1){
iS = d_Nr -1;
}else{
iS = row+1;
}
if(col ==0){
jW = 0;
}else{
jW =col -1;
}
if(col == d_Nc -1){
jE = d_Nc - 1;
}else{
jE = col +1;
}
// directional derivatives, ICOV, diffusion coefficent
d_Jc = d_I[ei]; // get value of the current element
// directional derivates (every element of IMAGE)(try to copy to shared
// memory or temp files)
d_dN_loc =
d_I[iN + d_Nr * col] - d_Jc; // north direction derivative
d_dS_loc =
d_I[iS + d_Nr * col] - d_Jc; // south direction derivative
d_dW_loc =
d_I[row + d_Nr * jW] - d_Jc; // west direction derivative
d_dE_loc =
d_I[row + d_Nr * jE] - d_Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
d_G2 = (d_dN_loc * d_dN_loc + d_dS_loc * d_dS_loc +
d_dW_loc * d_dW_loc + d_dE_loc * d_dE_loc) /
(d_Jc * d_Jc); // gradient (based on derivatives)
// normalized discrete laplacian (equ 54)
d_L = (d_dN_loc + d_dS_loc + d_dW_loc + d_dE_loc) /
d_Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
d_num = (0.5 * d_G2) -
((1.0 / 16.0) *
(d_L * d_L)); // num (based on gradient and laplacian)
d_den = 1 + (0.25 * d_L); // den (based on laplacian)
d_qsqr = d_num / (d_den * d_den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
d_den = (d_qsqr - d_q0sqr) /
(d_q0sqr * (1 + d_q0sqr)); // den (based on qsqr and q0sqr)
d_c_loc = 1.0 / (1.0 + d_den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (d_c_loc < 0) { // if diffusion coefficient < 0
d_c_loc = 0; // ... set to 0
} else if (d_c_loc > 1) { // if diffusion coefficient > 1
d_c_loc = 1; // ... set to 1
}
// save data to global memory
d_dN[ei] = d_dN_loc;
d_dS[ei] = d_dS_loc;
d_dW[ei] = d_dW_loc;
d_dE[ei] = d_dE_loc;
d_c[ei] = d_c_loc;
}
}
| bdbdb201b3955f68c80a20bd8e06aa8dc655ca23.cu | // BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY
// ACCESS
// srad kernel
__global__ void srad(fp d_lambda, int d_Nr, int d_Nc, long d_Ne,
fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW, fp d_q0sqr, fp *d_c, fp *d_I)
{
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
int iN, iS, jW, jE;
// variables
fp d_Jc;
fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc;
fp d_c_loc;
fp d_G2, d_L, d_num, d_den, d_qsqr;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
if(row == 0) {
iN = 0;
}else{
iN = row -1;
}
if(row == d_Nr -1){
iS = d_Nr -1;
}else{
iS = row+1;
}
if(col ==0){
jW = 0;
}else{
jW =col -1;
}
if(col == d_Nc -1){
jE = d_Nc - 1;
}else{
jE = col +1;
}
// directional derivatives, ICOV, diffusion coefficent
d_Jc = d_I[ei]; // get value of the current element
// directional derivates (every element of IMAGE)(try to copy to shared
// memory or temp files)
d_dN_loc =
d_I[iN + d_Nr * col] - d_Jc; // north direction derivative
d_dS_loc =
d_I[iS + d_Nr * col] - d_Jc; // south direction derivative
d_dW_loc =
d_I[row + d_Nr * jW] - d_Jc; // west direction derivative
d_dE_loc =
d_I[row + d_Nr * jE] - d_Jc; // east direction derivative
// normalized discrete gradient mag squared (equ 52,53)
d_G2 = (d_dN_loc * d_dN_loc + d_dS_loc * d_dS_loc +
d_dW_loc * d_dW_loc + d_dE_loc * d_dE_loc) /
(d_Jc * d_Jc); // gradient (based on derivatives)
// normalized discrete laplacian (equ 54)
d_L = (d_dN_loc + d_dS_loc + d_dW_loc + d_dE_loc) /
d_Jc; // laplacian (based on derivatives)
// ICOV (equ 31/35)
d_num = (0.5 * d_G2) -
((1.0 / 16.0) *
(d_L * d_L)); // num (based on gradient and laplacian)
d_den = 1 + (0.25 * d_L); // den (based on laplacian)
d_qsqr = d_num / (d_den * d_den); // qsqr (based on num and den)
// diffusion coefficent (equ 33) (every element of IMAGE)
d_den = (d_qsqr - d_q0sqr) /
(d_q0sqr * (1 + d_q0sqr)); // den (based on qsqr and q0sqr)
d_c_loc = 1.0 / (1.0 + d_den); // diffusion coefficient (based on den)
// saturate diffusion coefficent to 0-1 range
if (d_c_loc < 0) { // if diffusion coefficient < 0
d_c_loc = 0; // ... set to 0
} else if (d_c_loc > 1) { // if diffusion coefficient > 1
d_c_loc = 1; // ... set to 1
}
// save data to global memory
d_dN[ei] = d_dN_loc;
d_dS[ei] = d_dS_loc;
d_dW[ei] = d_dW_loc;
d_dE[ei] = d_dE_loc;
d_c[ei] = d_c_loc;
}
}
|
5f90c67bddd7c7b8d5aee8c57c756f033e2c6904.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "blake2b.cu"
#include "eqcuda.hpp"
typedef uint16_t u16;
typedef uint64_t u64;
//#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr,"GPU assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1<<BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS+1+1;
static const u32 SLOTRANGE = 1<<SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE-1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1<<RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS-1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES+HASHESPERBLAKE-1)/HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 1;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS+RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
__device__ bool prob_disjoint(const tree other) const {
tree xort(bid_s0_s1_x ^ other.bid_s0_s1_x);
return xort.bucketid() || (xort.slotid0() && xort.slotid1());
// next two tests catch much fewer cases and are therefore skipped
// && slotid0() != other.slotid1() && slotid1() != other.slotid0()
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK+1)/2];
bucket1 *trees1[WK/2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
/*
void setheadernonce(const char *headernonce, const u32 len) {
setheader(&blake_ctx, headernonce);
nsols = 0;
}
*/
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ bool orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i=0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size+i];
indices[size+i] = tmp;
}
}
return false;
}
__device__ bool listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
return false;
}
__device__ bool listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
return listindices1(buck[t.slotid0()].attr, indices) ||
listindices1(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
return listindices2(buck[t.slotid0()].attr, indices) ||
listindices2(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
return listindices3(buck[t.slotid0()].attr, indices) ||
listindices3(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
return listindices4(buck[t.slotid0()].attr, indices) ||
listindices4(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
return listindices5(buck[t.slotid0()].attr, indices) ||
listindices5(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
return listindices6(buck[t.slotid0()].attr, indices) ||
listindices6(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
return listindices7(buck[t.slotid0()].attr, indices) ||
listindices7(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
return listindices8(buck[t.slotid0()].attr, indices) ||
listindices8(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
if (listindices9(t, prf)) return;
#elif WK==7
if (listindices7(t, prf)) return;
#elif WK==5
if (listindices5(t, prf)) return;
#elif WK==3
if (listindices3(t, prf)) return;
#else
#error not implemented
#endif
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==7
listindices7(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#elif WK==3
listindices3(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(hipMemcpy(ns, nslots[r&1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS-6);
binsizes[bsize]++;
}
for (u32 i=0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r): hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 8 == 4 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3f) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 4 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif DIGITBITS % 4 == 0 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#elif DIGITBITS % 4 == 0 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif DIGITBITS % 4 == 0 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3) << 8 | pslot->hash->bytes[prevbo+1];
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits-1].word == hash1[prevhashunits-1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ void addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN/8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#ifdef XINTREE
const u32 xhash = ph[1] & 0xf;
#endif
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (xhash = bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) & 0xf) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 2
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r-1)/2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]);
u32 xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 6)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS-1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) { // assume WK odd
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash) && pslot0->attr.prob_disjoint(pslot1->attr)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id) : threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleYield));
checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r=0; r < WK; r++)
if ((r&1) == 0)
eq->hta.trees0[r/2] = (bucket0 *)(heap0 + r/2);
else
eq->hta.trees1[r/2] = (bucket1 *)(heap1 + r/2);
checkCudaErrors(hipMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(hipMemset((void*)eq->nslots, 0, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(hipMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi)));
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
}
eq_cuda_context::~eq_cuda_context()
{
checkCudaErrors(hipFree(eq->nslots));
checkCudaErrors(hipFree(eq->sols));
checkCudaErrors(hipFree(eq->hta.trees0[0]));
checkCudaErrors(hipFree(eq->hta.trees1[0]));
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
proof sols[MAXSOLS];
/*
int nonce = 0;
char headernonce[HEADERNONCELEN];
u32 hdrlen = strlen(header);
memcpy(headernonce, header, hdrlen);
memset(headernonce+hdrlen, 0, sizeof(headernonce)-hdrlen);
//for (int r = 0; r < range; r++) {
//int r = 0;
hipEventRecord(start, NULL);
((u32 *)headernonce)[32] = htole32(nonce+r);
eq->setheadernonce(headernonce, sizeof(headernonce));
checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice));
*/
checkCudaErrors(hipSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( digitH), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
hipLaunchKernelGGL(( digit_1), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit2), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit3), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit4), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit5), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit6), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit7), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit8), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
#else
for (u32 r=1; r < WK; r++) {
r&1 ? hipLaunchKernelGGL(( digitO), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq, r)
: hipLaunchKernelGGL(( digitE), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq, r);
}
#endif
//}
hipLaunchKernelGGL(( digitK), dim3(totalblocks),dim3(threadsperblock) , 0, 0, device_eq);
/*
checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost));
u32 maxsols = min(MAXSOLS, eq->nsols);
checkCudaErrors(hipMemcpy(sols, eq->sols, maxsols * sizeof(proof), hipMemcpyDeviceToHost));
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
*/
checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(sols, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = sols[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
#ifdef CUDA_DEBUG
u32 s, nsols, ndupes;
for (s = nsols = ndupes = 0; s < MAXSOLS; s++) {
if (duped(sols[s])) {
ndupes++;
continue;
}
nsols++;
if (true) {
for (int i = 0; i < PROOFSIZE; i++)
printf(" %jx", (uintmax_t)sols[s][i]);
printf("\n");
}
}
#endif
/*
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = sols[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
*/
hashdonef();
#ifdef CUDA_DEBUG
float duration;
hipEventElapsedTime(&duration, start, stop);
printf("%d rounds completed in %.3f seconds.\n", WK, duration / 1000.0f);
#endif
}
#ifdef CUDA_DEBUG
#include <unistd.h>
int main(int argc, char **argv) {
eq_cuda_context * cuda = new eq_cuda_context(1024, 1, 0);
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> functionFound = [] (const std::vector<uint32_t>& index_vector, size_t cbitlen, const unsigned char* compressed_sol)
{
};
std::function<bool()> functionCancel = []() {
return false;
};
std::function<void(void)> functionDone = []() {
};
cuda->solve("",
0,
"",
0,
functionCancel,
functionFound,
functionDone);
return 0;
}
#endif | 5f90c67bddd7c7b8d5aee8c57c756f033e2c6904.cu | // Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#include "../cpu_tromp/equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "blake2b.cu"
#include "eqcuda.hpp"
typedef uint16_t u16;
typedef uint64_t u64;
//#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1<<BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS+1+1;
static const u32 SLOTRANGE = 1<<SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE-1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1<<RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS-1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES+HASHESPERBLAKE-1)/HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 1;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS+RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
__device__ bool prob_disjoint(const tree other) const {
tree xort(bid_s0_s1_x ^ other.bid_s0_s1_x);
return xort.bucketid() || (xort.slotid0() && xort.slotid1());
// next two tests catch much fewer cases and are therefore skipped
// && slotid0() != other.slotid1() && slotid1() != other.slotid0()
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK+1)/2];
bucket1 *trees1[WK/2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) {
setheader(&blake_ctx, header, len, nonce, nlen);
checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
/*
void setheadernonce(const char *headernonce, const u32 len) {
setheader(&blake_ctx, headernonce);
nsols = 0;
}
*/
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ bool orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i=0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size+i];
indices[size+i] = tmp;
}
}
return false;
}
__device__ bool listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
return false;
}
__device__ bool listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
return listindices1(buck[t.slotid0()].attr, indices) ||
listindices1(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
return listindices2(buck[t.slotid0()].attr, indices) ||
listindices2(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
return listindices3(buck[t.slotid0()].attr, indices) ||
listindices3(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
return listindices4(buck[t.slotid0()].attr, indices) ||
listindices4(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
return listindices5(buck[t.slotid0()].attr, indices) ||
listindices5(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
return listindices6(buck[t.slotid0()].attr, indices) ||
listindices6(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
return listindices7(buck[t.slotid0()].attr, indices) ||
listindices7(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ bool listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
return listindices8(buck[t.slotid0()].attr, indices) ||
listindices8(buck[t.slotid1()].attr, indices+size) ||
orderindices(indices, size) || indices[0] == indices[size];
}
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
if (listindices9(t, prf)) return;
#elif WK==7
if (listindices7(t, prf)) return;
#elif WK==5
if (listindices5(t, prf)) return;
#elif WK==3
if (listindices3(t, prf)) return;
#else
#error not implemented
#endif
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==7
listindices7(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#elif WK==3
listindices3(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(cudaMemcpy(ns, nslots[r&1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS-6);
binsizes[bsize]++;
}
for (u32 i=0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r): hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 8 == 4 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 4 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3f) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif DIGITBITS % 8 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif DIGITBITS % 4 == 0 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif DIGITBITS % 4 == 0 && RESTBITS == 6
return pslot->hash->bytes[prevbo] & 0x3f;
#elif DIGITBITS % 4 == 0 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif DIGITBITS % 4 == 0 && RESTBITS == 10
return (pslot->hash->bytes[prevbo] & 0x3) << 8 | pslot->hash->bytes[prevbo+1];
#elif RESTBITS == 0
return 0;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits-1].word == hash1[prevhashunits-1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ void addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN/8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#ifdef XINTREE
const u32 xhash = ph[1] & 0xf;
#endif
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (xhash = bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) & 0xf) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 2
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r-1)/2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]);
u32 xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN % 24 == 0 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 6)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS-1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash1(pslot1)); cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
for (cd.addslot(s1, htl.getxhash0(pslot1)); cd.nextcollision(); ) { // assume WK odd
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash) && pslot0->attr.prob_disjoint(pslot1->attr)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id) : threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
eq = new equi(threadsperblock * totalblocks);
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleYield));
checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0)));
checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1)));
for (u32 r=0; r < WK; r++)
if ((r&1) == 0)
eq->hta.trees0[r/2] = (bucket0 *)(heap0 + r/2);
else
eq->hta.trees1[r/2] = (bucket1 *)(heap1 + r/2);
checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(cudaMemset((void*)eq->nslots, 0, 2 * NBUCKETS * sizeof(u32)));
checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof)));
checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi)));
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
}
eq_cuda_context::~eq_cuda_context()
{
checkCudaErrors(cudaFree(eq->nslots));
checkCudaErrors(cudaFree(eq->sols));
checkCudaErrors(cudaFree(eq->hta.trees0[0]));
checkCudaErrors(cudaFree(eq->hta.trees1[0]));
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
delete eq;
}
void eq_cuda_context::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
proof sols[MAXSOLS];
/*
int nonce = 0;
char headernonce[HEADERNONCELEN];
u32 hdrlen = strlen(header);
memcpy(headernonce, header, hdrlen);
memset(headernonce+hdrlen, 0, sizeof(headernonce)-hdrlen);
//for (int r = 0; r < range; r++) {
//int r = 0;
cudaEventRecord(start, NULL);
((u32 *)headernonce)[32] = htole32(nonce+r);
eq->setheadernonce(headernonce, sizeof(headernonce));
checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice));
*/
checkCudaErrors(cudaSetDevice(device_id));
eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len);
checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice));
digitH<<<totalblocks,threadsperblock >>>(device_eq);
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
digit_1<<<totalblocks,threadsperblock >>>(device_eq);
digit2<<<totalblocks,threadsperblock >>>(device_eq);
digit3<<<totalblocks,threadsperblock >>>(device_eq);
digit4<<<totalblocks,threadsperblock >>>(device_eq);
digit5<<<totalblocks,threadsperblock >>>(device_eq);
digit6<<<totalblocks,threadsperblock >>>(device_eq);
digit7<<<totalblocks,threadsperblock >>>(device_eq);
digit8<<<totalblocks,threadsperblock >>>(device_eq);
#else
for (u32 r=1; r < WK; r++) {
r&1 ? digitO<<<totalblocks,threadsperblock >>>(device_eq, r)
: digitE<<<totalblocks,threadsperblock >>>(device_eq, r);
}
#endif
//}
digitK<<<totalblocks,threadsperblock >>>(device_eq);
/*
checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost));
u32 maxsols = min(MAXSOLS, eq->nsols);
checkCudaErrors(cudaMemcpy(sols, eq->sols, maxsols * sizeof(proof), cudaMemcpyDeviceToHost));
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
*/
checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(sols, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost));
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = sols[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
#ifdef CUDA_DEBUG
u32 s, nsols, ndupes;
for (s = nsols = ndupes = 0; s < MAXSOLS; s++) {
if (duped(sols[s])) {
ndupes++;
continue;
}
nsols++;
if (true) {
for (int i = 0; i < PROOFSIZE; i++)
printf(" %jx", (uintmax_t)sols[s][i]);
printf("\n");
}
}
#endif
/*
for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++)
{
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = sols[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
if (cancelf()) return;
}
*/
hashdonef();
#ifdef CUDA_DEBUG
float duration;
cudaEventElapsedTime(&duration, start, stop);
printf("%d rounds completed in %.3f seconds.\n", WK, duration / 1000.0f);
#endif
}
#ifdef CUDA_DEBUG
#include <unistd.h>
int main(int argc, char **argv) {
eq_cuda_context * cuda = new eq_cuda_context(1024, 1, 0);
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> functionFound = [] (const std::vector<uint32_t>& index_vector, size_t cbitlen, const unsigned char* compressed_sol)
{
};
std::function<bool()> functionCancel = []() {
return false;
};
std::function<void(void)> functionDone = []() {
};
cuda->solve("",
0,
"",
0,
functionCancel,
functionFound,
functionDone);
return 0;
}
#endif |
46aa8a5e3862375a8cab82abcb289c442828eeb9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#define DATATYPE int
#define ARRAYLEN 1024*1024*256
#define REP 128
//#define PRINTNEED
#define TIMETESTEVENT
#include <hip/hip_runtime.h>
#include "repeat.h"
__global__ void test_global_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[0] =p;
time[0] = time_tmp;
}
texture <int,1,hipReadModeElementType> texref;
__global__ void test_texture_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=tex1Dfetch(texref,p);)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[1] =p;
time[1] = time_tmp;
}
void call_test_latency(int step,int its,double *h_time)
{
DATATYPE *h_array;
h_array=(DATATYPE*)malloc(sizeof(DATATYPE)*ARRAYLEN);
for (int i=0;i<ARRAYLEN;i++)
{
h_array[i]=(i+step)%ARRAYLEN;
}
DATATYPE *d_array;
hipMalloc((void**)&d_array,sizeof(DATATYPE)*ARRAYLEN);
// hipMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,hipMemcpyHostToDevice);
if (hipSuccess != hipMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,hipMemcpyHostToDevice)){ printf("1\n"); return; }
/*texture*/
double *d_time;
hipMalloc((void**)&d_time,sizeof(double)*6);
DATATYPE *d_out,*h_out;
h_out=(DATATYPE *)malloc(sizeof(DATATYPE)*6);
hipMalloc((void**)&d_out,sizeof(DATATYPE)*6);
hipLaunchKernelGGL(( test_global_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its,d_array);
if (hipDeviceSynchronize() != hipSuccess){
printf("3\n");
return;
}
hipMemcpy(h_out,d_out,sizeof(DATATYPE)*6,hipMemcpyDeviceToHost);
hipMemcpy(h_time,d_time,sizeof(double)*6,hipMemcpyDeviceToHost);
printf("%d:\t%f\t\n",step,h_time[0]);
hipUnbindTexture(texref);
hipFree(d_array);
hipFree(d_time);
hipFree(d_out);
free(h_array);
free(h_out);
}
int main()
{
double *h_time;
h_time=(double*)malloc(sizeof(double)*6*1024);
printf("step\t global\t texture\n");
for (int i=1024;i<=ARRAYLEN;i+=1024)
{
call_test_latency(i,1,&h_time[(i-1)*6]);
}
call_test_latency(1024,1,h_time);
//printf("average:\t");
//for (int i=0;i<2;i++)
//{
// double average=0.0;
// for (int j=0;j<1024;j++)
// {
// average+=h_time[j*6+i];
// }
// average/=1024.0;
// printf("%f\t",average);
//}
printf("\n");
return 0;
}
| 46aa8a5e3862375a8cab82abcb289c442828eeb9.cu | #include <stdlib.h>
#include <stdio.h>
#define DATATYPE int
#define ARRAYLEN 1024*1024*256
#define REP 128
//#define PRINTNEED
#define TIMETESTEVENT
#include <cuda_runtime.h>
#include "repeat.h"
__global__ void test_global_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[0] =p;
time[0] = time_tmp;
}
texture <int,1,cudaReadModeElementType> texref;
__global__ void test_texture_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=tex1Dfetch(texref,p);)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[1] =p;
time[1] = time_tmp;
}
void call_test_latency(int step,int its,double *h_time)
{
DATATYPE *h_array;
h_array=(DATATYPE*)malloc(sizeof(DATATYPE)*ARRAYLEN);
for (int i=0;i<ARRAYLEN;i++)
{
h_array[i]=(i+step)%ARRAYLEN;
}
DATATYPE *d_array;
cudaMalloc((void**)&d_array,sizeof(DATATYPE)*ARRAYLEN);
// cudaMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,cudaMemcpyHostToDevice);
if (cudaSuccess != cudaMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,cudaMemcpyHostToDevice)){ printf("1\n"); return; }
/*texture*/
double *d_time;
cudaMalloc((void**)&d_time,sizeof(double)*6);
DATATYPE *d_out,*h_out;
h_out=(DATATYPE *)malloc(sizeof(DATATYPE)*6);
cudaMalloc((void**)&d_out,sizeof(DATATYPE)*6);
test_global_latency <<<1,1>>>(d_time,d_out,its,d_array);
if (cudaDeviceSynchronize() != cudaSuccess){
printf("3\n");
return;
}
cudaMemcpy(h_out,d_out,sizeof(DATATYPE)*6,cudaMemcpyDeviceToHost);
cudaMemcpy(h_time,d_time,sizeof(double)*6,cudaMemcpyDeviceToHost);
printf("%d:\t%f\t\n",step,h_time[0]);
cudaUnbindTexture(texref);
cudaFree(d_array);
cudaFree(d_time);
cudaFree(d_out);
free(h_array);
free(h_out);
}
int main()
{
double *h_time;
h_time=(double*)malloc(sizeof(double)*6*1024);
printf("step\t global\t texture\n");
for (int i=1024;i<=ARRAYLEN;i+=1024)
{
call_test_latency(i,1,&h_time[(i-1)*6]);
}
call_test_latency(1024,1,h_time);
//printf("average:\t");
//for (int i=0;i<2;i++)
//{
// double average=0.0;
// for (int j=0;j<1024;j++)
// {
// average+=h_time[j*6+i];
// }
// average/=1024.0;
// printf("%f\t",average);
//}
printf("\n");
return 0;
}
|
1c5029daf959f6f836cfa121227163cc117ce297.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file roipooling_gpu.cu
// @brief Region of interest pooling block implementation (GPU)
// @author Hakan Bilen
// @author Abishek Dutta
// @author Andrea Vedaldi
/*
Copyright (C) 2016 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "roipooling.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <cfloat>
#include <algorithm>
#include <sm_20_atomic_functions.h>
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
template<typename T>
struct Geom {
int subdivisions[2] ;
T transform[6] ;
Geom(int const subdivisions[2], double const transform[6])
{
this->subdivisions[0] = subdivisions[0] ;
this->subdivisions[1] = subdivisions[1] ;
this->transform[0] = transform[0] ;
this->transform[1] = transform[1] ;
this->transform[2] = transform[2] ;
this->transform[3] = transform[3] ;
this->transform[4] = transform[4] ;
this->transform[5] = transform[5] ;
}
} ;
struct Bounds {
int image, offset, hstart, hend, wstart, wend ;
bool isEmpty ;
} ;
template<typename T>
__device__ __forceinline__ static Bounds
getBounds(int outputIndex,
int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
Bounds b ;
int ph = outputIndex ;
int pw = ph / geom.subdivisions[0] ;
int pc = pw / geom.subdivisions[1] ;
int pr = pc / numChannels ;
ph %= geom.subdivisions[0] ;
pw %= geom.subdivisions[1] ;
pc %= numChannels ;
rois += 5 * pr ;
// Apply sacle and offset to each ROI coordinate.
T u1_ = rois[1] ;
T v1_ = rois[2] ;
T u2_ = rois[3] ;
T v2_ = rois[4] ;
T u1 = geom.transform[0] * u1_ + geom.transform[2] * v1_ + geom.transform[4] ;
T v1 = geom.transform[1] * u1_ + geom.transform[3] * v1_ + geom.transform[5] ;
T u2 = geom.transform[0] * u2_ + geom.transform[2] * v2_ + geom.transform[4] ;
T v2 = geom.transform[1] * u2_ + geom.transform[3] * v2_ + geom.transform[5] ;
// First and last pixel of each ROI (rounded
// for compatibility with the Caffe definition).
int roi_image = (int)rois[0];
int roi_start_h = (int)round(v1) - 1 ;
int roi_start_w = (int)round(u1) - 1 ;
int roi_end_h = (int)round(v2) - 1 ;
int roi_end_w = (int)round(u2) - 1 ;
int roi_height = max(roi_end_h - roi_start_h + 1, 1) ;
int roi_width = max(roi_end_w - roi_start_w + 1, 1) ;
T bin_size_h = (T)roi_height / geom.subdivisions[0] ;
T bin_size_w = (T)roi_width / geom.subdivisions[1] ;
roi_image = min(max(roi_image - 1,0), (int)size - 1) ;
b.offset = (roi_image * numChannels + pc) * (width*height) ;
b.wstart = (int)floor(((T)pw) * bin_size_w) ;
b.wend = (int)ceil(((T)(pw + 1)) * bin_size_w) ;
b.wstart = min(max(b.wstart + roi_start_w, 0), (int)width) ;
b.wend = min(max(b.wend + roi_start_w, 0), (int)width) ;
b.hstart = (int)floor(((T)ph) * bin_size_h) ;
b.hend = (int)ceil(((T)(ph + 1)) * bin_size_h) ;
b.hstart = min(max(b.hstart + roi_start_h, 0), (int)height) ;
b.hend = min(max(b.hend + roi_start_h, 0), (int)height) ;
b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ;
return b ;
}
/* ---------------------------------------------------------------- */
/* roipooling_average_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_average_kernel
(T* output,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
T bestValue = 0;
const T coeff = ((T)1.) / (T)((b.wend-b.wstart) * (b.hend-b.hstart));
for (int w = b.wstart; w < b.wend; ++w) {
for (int h = b.hstart; h < b.hend; ++h) {
int index = w * height + h ;
bestValue += data[index] * coeff ;
}
}
output[outputIndex] = bestValue ;
}
}
/* ---------------------------------------------------------------- */
/* roipooling_max_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_max_kernel
(T* output,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
if (! b.isEmpty) {
T bestValue = -FLT_MAX;
for (int w = b.wstart; w < b.wend; ++w) {
for (int h = b.hstart; h < b.hend; ++h) {
int index = w * height + h ;
bestValue = max(bestValue, data[index]) ;
}
}
output[outputIndex] = bestValue ;
} else {
output[outputIndex] = 0 ;
}
}
}
/* ---------------------------------------------------------------- */
/* atomicAdd */
/* ---------------------------------------------------------------- */
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
// an implementation of atomicAdd() for double (really slow)
static __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/* ---------------------------------------------------------------- */
/* roipooling_average_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_average_backward_kernel
(T* derData,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
const T* derOutput,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
derData += b.offset ;
const T coeff = ((T)1.) / (T)((b.wend-b.wstart)*(b.hend-b.hstart)) ;
for (int h = b.hstart; h < b.hend; ++h) {
for (int w = b.wstart; w < b.wend; ++w) {
int index = w * height + h ;
atomicAdd(derData + index, derOutput[outputIndex] * coeff) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* roipooling_max_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_max_backward_kernel
(T* derData,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
const T* derOutput,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
if (! b.isEmpty) {
data += b.offset ;
derData += b.offset ;
int bestIndex = min(b.wstart,width-1) * height + min(b.hstart,height-1);
T bestValue = -FLT_MAX;
for (int h = b.hstart; h < b.hend; ++h) {
for (int w = b.wstart; w < b.wend; ++w) {
int index = w * height + h ;
T value = data[index] ;
if (value > bestValue) {
bestValue = value ;
bestIndex = index ;
}
}
}
atomicAdd(derData + bestIndex, derOutput[outputIndex]) ;
}
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct roipooling_max<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* output,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
hipLaunchKernelGGL(( roipooling_max_kernel<type>)
, dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)),dim3(VL_CUDA_NUM_THREADS) , 0, 0,
output,
data, height, width, numChannels, size,
rois, numROIs,
Geom<type>(subdivisions,transform)) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
type const* derOutput,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
hipLaunchKernelGGL(( roipooling_max_backward_kernel<type>)
, dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data,
height, width, numChannels, size,
rois, numROIs,
derOutput,
Geom<type>(subdivisions,transform)) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // roipooling_max
template <typename type>
struct roipooling_average<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* output,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
hipLaunchKernelGGL(( roipooling_average_kernel<type>)
, dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)),dim3(VL_CUDA_NUM_THREADS) , 0, 0,
output, data,
height, width, numChannels, size,
rois, numROIs,
Geom<type>(subdivisions,transform)) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data, // <- this is not needed for avg pooling
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
type const* derOutput,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
hipLaunchKernelGGL(( roipooling_average_backward_kernel<type>)
, dim3(divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data,
height, width, numChannels, size,
rois, numROIs,
derOutput,
Geom<type>(subdivisions,transform)) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // roipooling_average
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::roipooling_max<vl::VLDT_GPU, float> ;
template struct vl::impl::roipooling_average<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::roipooling_max<vl::VLDT_GPU, double> ;
template struct vl::impl::roipooling_average<vl::VLDT_GPU, double> ;
#endif
| 1c5029daf959f6f836cfa121227163cc117ce297.cu | // @file roipooling_gpu.cu
// @brief Region of interest pooling block implementation (GPU)
// @author Hakan Bilen
// @author Abishek Dutta
// @author Andrea Vedaldi
/*
Copyright (C) 2016 Hakan Bilen, Abishek Dutta, and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "roipooling.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <cfloat>
#include <algorithm>
#include <sm_20_atomic_functions.h>
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
template<typename T>
struct Geom {
int subdivisions[2] ;
T transform[6] ;
Geom(int const subdivisions[2], double const transform[6])
{
this->subdivisions[0] = subdivisions[0] ;
this->subdivisions[1] = subdivisions[1] ;
this->transform[0] = transform[0] ;
this->transform[1] = transform[1] ;
this->transform[2] = transform[2] ;
this->transform[3] = transform[3] ;
this->transform[4] = transform[4] ;
this->transform[5] = transform[5] ;
}
} ;
struct Bounds {
int image, offset, hstart, hend, wstart, wend ;
bool isEmpty ;
} ;
template<typename T>
__device__ __forceinline__ static Bounds
getBounds(int outputIndex,
int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
Bounds b ;
int ph = outputIndex ;
int pw = ph / geom.subdivisions[0] ;
int pc = pw / geom.subdivisions[1] ;
int pr = pc / numChannels ;
ph %= geom.subdivisions[0] ;
pw %= geom.subdivisions[1] ;
pc %= numChannels ;
rois += 5 * pr ;
// Apply sacle and offset to each ROI coordinate.
T u1_ = rois[1] ;
T v1_ = rois[2] ;
T u2_ = rois[3] ;
T v2_ = rois[4] ;
T u1 = geom.transform[0] * u1_ + geom.transform[2] * v1_ + geom.transform[4] ;
T v1 = geom.transform[1] * u1_ + geom.transform[3] * v1_ + geom.transform[5] ;
T u2 = geom.transform[0] * u2_ + geom.transform[2] * v2_ + geom.transform[4] ;
T v2 = geom.transform[1] * u2_ + geom.transform[3] * v2_ + geom.transform[5] ;
// First and last pixel of each ROI (rounded
// for compatibility with the Caffe definition).
int roi_image = (int)rois[0];
int roi_start_h = (int)round(v1) - 1 ;
int roi_start_w = (int)round(u1) - 1 ;
int roi_end_h = (int)round(v2) - 1 ;
int roi_end_w = (int)round(u2) - 1 ;
int roi_height = max(roi_end_h - roi_start_h + 1, 1) ;
int roi_width = max(roi_end_w - roi_start_w + 1, 1) ;
T bin_size_h = (T)roi_height / geom.subdivisions[0] ;
T bin_size_w = (T)roi_width / geom.subdivisions[1] ;
roi_image = min(max(roi_image - 1,0), (int)size - 1) ;
b.offset = (roi_image * numChannels + pc) * (width*height) ;
b.wstart = (int)floor(((T)pw) * bin_size_w) ;
b.wend = (int)ceil(((T)(pw + 1)) * bin_size_w) ;
b.wstart = min(max(b.wstart + roi_start_w, 0), (int)width) ;
b.wend = min(max(b.wend + roi_start_w, 0), (int)width) ;
b.hstart = (int)floor(((T)ph) * bin_size_h) ;
b.hend = (int)ceil(((T)(ph + 1)) * bin_size_h) ;
b.hstart = min(max(b.hstart + roi_start_h, 0), (int)height) ;
b.hend = min(max(b.hend + roi_start_h, 0), (int)height) ;
b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ;
return b ;
}
/* ---------------------------------------------------------------- */
/* roipooling_average_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_average_kernel
(T* output,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
T bestValue = 0;
const T coeff = ((T)1.) / (T)((b.wend-b.wstart) * (b.hend-b.hstart));
for (int w = b.wstart; w < b.wend; ++w) {
for (int h = b.hstart; h < b.hend; ++h) {
int index = w * height + h ;
bestValue += data[index] * coeff ;
}
}
output[outputIndex] = bestValue ;
}
}
/* ---------------------------------------------------------------- */
/* roipooling_max_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_max_kernel
(T* output,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
if (! b.isEmpty) {
T bestValue = -FLT_MAX;
for (int w = b.wstart; w < b.wend; ++w) {
for (int h = b.hstart; h < b.hend; ++h) {
int index = w * height + h ;
bestValue = max(bestValue, data[index]) ;
}
}
output[outputIndex] = bestValue ;
} else {
output[outputIndex] = 0 ;
}
}
}
/* ---------------------------------------------------------------- */
/* atomicAdd */
/* ---------------------------------------------------------------- */
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
// an implementation of atomicAdd() for double (really slow)
static __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/* ---------------------------------------------------------------- */
/* roipooling_average_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_average_backward_kernel
(T* derData,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
const T* derOutput,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
derData += b.offset ;
const T coeff = ((T)1.) / (T)((b.wend-b.wstart)*(b.hend-b.hstart)) ;
for (int h = b.hstart; h < b.hend; ++h) {
for (int w = b.wstart; w < b.wend; ++w) {
int index = w * height + h ;
atomicAdd(derData + index, derOutput[outputIndex] * coeff) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* roipooling_max_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_max_backward_kernel
(T* derData,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
const T* derOutput,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
if (! b.isEmpty) {
data += b.offset ;
derData += b.offset ;
int bestIndex = min(b.wstart,width-1) * height + min(b.hstart,height-1);
T bestValue = -FLT_MAX;
for (int h = b.hstart; h < b.hend; ++h) {
for (int w = b.wstart; w < b.wend; ++w) {
int index = w * height + h ;
T value = data[index] ;
if (value > bestValue) {
bestValue = value ;
bestIndex = index ;
}
}
}
atomicAdd(derData + bestIndex, derOutput[outputIndex]) ;
}
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct roipooling_max<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* output,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_max_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>>
(output,
data, height, width, numChannels, size,
rois, numROIs,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
type const* derOutput,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_max_backward_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data,
height, width, numChannels, size,
rois, numROIs,
derOutput,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // roipooling_max
template <typename type>
struct roipooling_average<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* output,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_average_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>>
(output, data,
height, width, numChannels, size,
rois, numROIs,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data, // <- this is not needed for avg pooling
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
type const* derOutput,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_average_backward_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data,
height, width, numChannels, size,
rois, numROIs,
derOutput,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // roipooling_average
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::roipooling_max<vl::VLDT_GPU, float> ;
template struct vl::impl::roipooling_average<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::roipooling_max<vl::VLDT_GPU, double> ;
template struct vl::impl::roipooling_average<vl::VLDT_GPU, double> ;
#endif
|
c9fbe89b68f2d7e909b74501ef2dec7fd377c3ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes, kernels
#include "gauss_eliminate_kernel.cu"
#define MIN_NUMBER 2
#define MAX_NUMBER 50
extern "C" int compute_gold(float*, const float*, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix M);
Matrix allocate_matrix(int num_rows, int num_columns, int init);
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost);
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice);
void gauss_eliminate_on_device(const Matrix M, Matrix P);
int perform_simple_check(const Matrix M);
void print_matrix(const Matrix M);
void write_matrix_to_file(const Matrix M);
float get_random_number(int, int);
void checkCUDAError(const char *msg);
int checkResults(float *reference, float *gpu_result, int num_elements, float threshold);
void FreeDeviceMatrix(Matrix *M);
int
main(int argc, char** argv)
{
// Matrices for the program
Matrix A; // The NxN input matrix
Matrix U; // The upper triangular matrix
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1);
U = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
// Perform Gaussian elimination on the CPU
Matrix reference = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
int status = compute_gold(reference.elements, A.elements, A.num_rows);
if(status == 0){
printf("Failed to convert given matrix to upper triangular. Try again. Exiting. \n");
exit(0);
}
status = perform_simple_check(reference); // Check that the principal diagonal elements are 1
if(status == 0){
printf("The upper triangular matrix is incorrect. Exiting. \n");
exit(0);
}
printf("Gaussian elimination on the CPU was successful. \n");
// Perform the vector-matrix multiplication on the GPU. Return the result in U
gauss_eliminate_on_device(A, U);
// check if the device result is equivalent to the expected solution
int num_elements = MATRIX_SIZE*MATRIX_SIZE;
int res = checkResults(reference.elements, U.elements, num_elements, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
//PRINTING
//print_matrix(reference);
//printf("\n");
//print_matrix(U);
// Free host matrices
free(A.elements); A.elements = NULL;
free(U.elements); U.elements = NULL;
free(reference.elements); reference.elements = NULL;
return 0;
}
// CPU CODE FUNCTION
void
gauss_eliminate_on_device(const Matrix A, Matrix U){
int i, j, k;
Matrix Ud = allocate_matrix_on_gpu(U);
for (i = 0; i < MATRIX_SIZE; i++) {
for (j = 0; j < MATRIX_SIZE; j++)
U.elements[MATRIX_SIZE * i + j] = A.elements[MATRIX_SIZE * i + j];
}
copy_matrix_to_device(Ud, U);
int num_thread_blocks = ceil((float)MATRIX_SIZE/(float)THREAD_BLOCK_SIZE);
dim3 thread_blocks (THREAD_BLOCK_SIZE, 1, 1);
dim3 grid (num_thread_blocks, 1, 1);
struct timeval start, stop;
gettimeofday(&start, NULL);
for (k = 0; k < MATRIX_SIZE; k++){
checkCUDAError("");
hipLaunchKernelGGL(( gauss_eliminate_kernel), dim3(grid), dim3(thread_blocks), 0, 0, Ud.elements, k);
hipDeviceSynchronize();
}
gettimeofday(&stop, NULL);
printf("Parallel Time = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec -
start.tv_usec)/(float)1000000));
copy_matrix_from_device(U, Ud);
FreeDeviceMatrix(&Ud);
}
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M){
Matrix Md = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void**)&Md.elements, size);
return Md;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init){
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%.5f ", M.elements[i*M.num_rows + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
// Performs a simple check on the upper triangular matrix. Checks to see if the principal diagonal elements are 1
int
perform_simple_check(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++)
if((fabs(M.elements[M.num_rows*i + i] - 1.0)) > 0.001) return 0;
return 1;
}
// Writes the matrix to a file
void
write_matrix_to_file(const Matrix M){
FILE *fp;
fp = fopen("matrix.txt", "wt");
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
fprintf(fp, "%f", M.elements[i*M.num_rows + j]);
}
fclose(fp);
}
void
checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
break;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
| c9fbe89b68f2d7e909b74501ef2dec7fd377c3ba.cu | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes, kernels
#include "gauss_eliminate_kernel.cu"
#define MIN_NUMBER 2
#define MAX_NUMBER 50
extern "C" int compute_gold(float*, const float*, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix M);
Matrix allocate_matrix(int num_rows, int num_columns, int init);
void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost);
void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice);
void gauss_eliminate_on_device(const Matrix M, Matrix P);
int perform_simple_check(const Matrix M);
void print_matrix(const Matrix M);
void write_matrix_to_file(const Matrix M);
float get_random_number(int, int);
void checkCUDAError(const char *msg);
int checkResults(float *reference, float *gpu_result, int num_elements, float threshold);
void FreeDeviceMatrix(Matrix *M);
int
main(int argc, char** argv)
{
// Matrices for the program
Matrix A; // The NxN input matrix
Matrix U; // The upper triangular matrix
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1);
U = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
// Perform Gaussian elimination on the CPU
Matrix reference = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0);
int status = compute_gold(reference.elements, A.elements, A.num_rows);
if(status == 0){
printf("Failed to convert given matrix to upper triangular. Try again. Exiting. \n");
exit(0);
}
status = perform_simple_check(reference); // Check that the principal diagonal elements are 1
if(status == 0){
printf("The upper triangular matrix is incorrect. Exiting. \n");
exit(0);
}
printf("Gaussian elimination on the CPU was successful. \n");
// Perform the vector-matrix multiplication on the GPU. Return the result in U
gauss_eliminate_on_device(A, U);
// check if the device result is equivalent to the expected solution
int num_elements = MATRIX_SIZE*MATRIX_SIZE;
int res = checkResults(reference.elements, U.elements, num_elements, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
//PRINTING
//print_matrix(reference);
//printf("\n");
//print_matrix(U);
// Free host matrices
free(A.elements); A.elements = NULL;
free(U.elements); U.elements = NULL;
free(reference.elements); reference.elements = NULL;
return 0;
}
// CPU CODE FUNCTION
void
gauss_eliminate_on_device(const Matrix A, Matrix U){
int i, j, k;
Matrix Ud = allocate_matrix_on_gpu(U);
for (i = 0; i < MATRIX_SIZE; i++) {
for (j = 0; j < MATRIX_SIZE; j++)
U.elements[MATRIX_SIZE * i + j] = A.elements[MATRIX_SIZE * i + j];
}
copy_matrix_to_device(Ud, U);
int num_thread_blocks = ceil((float)MATRIX_SIZE/(float)THREAD_BLOCK_SIZE);
dim3 thread_blocks (THREAD_BLOCK_SIZE, 1, 1);
dim3 grid (num_thread_blocks, 1, 1);
struct timeval start, stop;
gettimeofday(&start, NULL);
for (k = 0; k < MATRIX_SIZE; k++){
checkCUDAError("");
gauss_eliminate_kernel<<< grid, thread_blocks>>>(Ud.elements, k);
cudaThreadSynchronize();
}
gettimeofday(&stop, NULL);
printf("Parallel Time = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec -
start.tv_usec)/(float)1000000));
copy_matrix_from_device(U, Ud);
FreeDeviceMatrix(&Ud);
}
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M){
Matrix Md = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void**)&Md.elements, size);
return Md;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init){
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%.5f ", M.elements[i*M.num_rows + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
// Performs a simple check on the upper triangular matrix. Checks to see if the principal diagonal elements are 1
int
perform_simple_check(const Matrix M){
for(unsigned int i = 0; i < M.num_rows; i++)
if((fabs(M.elements[M.num_rows*i + i] - 1.0)) > 0.001) return 0;
return 1;
}
// Writes the matrix to a file
void
write_matrix_to_file(const Matrix M){
FILE *fp;
fp = fopen("matrix.txt", "wt");
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
fprintf(fp, "%f", M.elements[i*M.num_rows + j]);
}
fclose(fp);
}
void
checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
break;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
|
87d68197b27cac9e0bb5e29ad69b65e6cbc1693e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Mandelbrot.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Mandelbrot::Mandelbrot(uint nMin, uint nMax, const Grid& grid, uint w, uint h, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Mandelbrot_Cuda_RGBA_uchar4", domaineMath), variateurAnimation(Interval<float>(nMin, nMax), 1.f)
{
// Tools
this->t = nMin; // protected dans Animable
}
Mandelbrot::~Mandelbrot()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
t = variateurAnimation.get();
hipLaunchKernelGGL(( mandelbrot) , dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t,domaineMath);
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
t = variateurAnimation.varierAndGet();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 87d68197b27cac9e0bb5e29ad69b65e6cbc1693e.cu | #include "Mandelbrot.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Mandelbrot::Mandelbrot(uint nMin, uint nMax, const Grid& grid, uint w, uint h, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Mandelbrot_Cuda_RGBA_uchar4", domaineMath), variateurAnimation(Interval<float>(nMin, nMax), 1.f)
{
// Tools
this->t = nMin; // protected dans Animable
}
Mandelbrot::~Mandelbrot()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Mandelbrot::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
t = variateurAnimation.get();
mandelbrot <<<dg,db>>>(ptrDevPixels,w,h,t,domaineMath);
}
/**
* Override
* Call periodicly by the API
*/
void Mandelbrot::animationStep()
{
t = variateurAnimation.varierAndGet();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
d1b826662068a79e5f05f628d26c2349f2f1a689.hip | // !!! This is a file automatically generated by hipify!!!
#include "simpleRayTracer.h"
// grid search
__device__ bool gridRayIntersectionSearch(const ray_t r,
const int Nshapes, const shape_t *shapes, const grid_t grid,
dfloat *t, int *currentShape){
int *boxContents = grid.boxContents;
bbox_t *bboxes = grid.bboxes;
int *boxStarts = grid.boxStarts;
// is start of ray in a grid cell ?
vector_t s = r.start; // will modify ray through s
vector_t d = r.dir;
// if ray is outside grid then project onto grid
if(s.x<grid.xmin){
if(d.x<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.x-grid.xmin)/d.x;
s.x = grid.xmin;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.x>grid.xmax){
if(d.x>=0) return false;
dfloat t0 = -(s.x-grid.xmax)/d.x;
s.x = grid.xmax;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.y<grid.ymin){
if(d.y<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.y-grid.ymin)/d.y;
s.y = grid.ymin;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.y>grid.ymax){
if(d.y>=0) return false;
dfloat t0 = -(s.y-grid.ymax)/d.y;
s.y = grid.ymax;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.z<grid.zmin){
if(d.z<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.z-grid.zmin)/d.z;
s.z = grid.zmin;
s.x += t0*d.x;
s.y += t0*d.y;
}
if(s.z>grid.zmax){
if(d.z>=0) return false;
dfloat t0 = -(s.z-grid.zmax)/d.z;
s.z = grid.zmax;
s.x += t0*d.x;
s.y += t0*d.y;
}
// now the ray start must be on the surface of the grid or in a cell
int cellI = iclamp((s.x-grid.xmin)*grid.invdx,0,grid.NI-1); // assumes grid.NI
int cellJ = iclamp((s.y-grid.ymin)*grid.invdy,0,grid.NJ-1);
int cellK = iclamp((s.z-grid.zmin)*grid.invdz,0,grid.NK-1);
ray_t newr = r;
newr.start = s;
newr.dir = d;
*currentShape = -1;
do{
int cellID = cellI + grid.NI*cellJ + grid.NI*grid.NJ*cellK;
*t = 20000; // TW ?
int start = boxStarts[cellID];
int end = boxStarts[cellID+1];
for(int offset=start;offset<end;++offset){
const int obj = boxContents[offset];
const shape_t shape = shapes[obj];
if(intersectRayShape(r, &shape, t)){
vector_t intersect = vectorAdd(r.start, vectorScale(*t, r.dir));
if(intersectPointGridCell(grid, intersect, cellI, cellJ, cellK)){
*currentShape = obj;
}
}
}
if(*currentShape != -1){
return true;
}
// find faces that ray passes through
unsigned int face = intersectRayBox(&newr,bboxes[cellID]);
if(face&1) --cellK; // face 0
if(face&2) --cellJ; // face 1
if(face&4) ++cellI; // face 2
if(face&8) ++cellJ; // face 3
if(face&16) --cellI;// face 4
if(face&32) ++cellK;// face 5
if(face==0){
break;
}
}while(cellI>=0 && cellI<grid.NI &&
cellJ>=0 && cellJ<grid.NJ &&
cellK>=0 && cellK<grid.NK);
return false;
}
__device__ colour_t gridTrace(const grid_t grid,
const int Nshapes,
const shape_t *shapes,
const int Nlights,
const light_t *lights,
const int Nmaterials,
const material_t *materials,
ray_t r,
int level,
dfloat coef,
colour_t bg){
colour_t black;
black.red = 0;
black.green = 0;
black.blue = 0;
// initialize color as black
colour_t c = black;
int Nrays = 0, rayID = 0;
ray_t rayStack[p_maxNrays];
// add initial ray to stack
rayID = 0;
r.level = 0;
r.coef = coef;
rayStack[Nrays] = r;
++Nrays;
// keep looping until the stack is exhausted or the maximum number of rays is reached
while(rayID<Nrays && Nrays<p_maxNrays){
// get ray
r = rayStack[rayID];
// look for intersection of this ray with shapes
int currentShapeID = -1;
dfloat t = 20000.f;
// look through grid to find intersections with ray
gridRayIntersectionSearch(r, Nshapes, shapes, grid, &t, ¤tShapeID);
// none found
if(currentShapeID == -1){
if(rayID==0)
c = bg;
// go to next ray
++rayID;
continue;
}
// shape at nearest ray intersection
shape_t currentShape = shapes[currentShapeID];
// compute intersection location
vector_t intersection = vectorAdd(r.start, vectorScale(t, r.dir));
// find unit surface normal
vector_t n = shapeComputeNormal(intersection, currentShape);
/* use shadow tracing to determine color contribution from this intersection */
dfloat rdotn = vectorDot(r.dir, n);
/* Find the material to determine the colour */
material_t currentMat = shapeComputeMaterial(Nmaterials, materials, intersection, currentShape);
// test for reflection
info_t info = currentMat.info;
if(info.emitter==1){
dfloat lambert = rdotn * r.coef;
c.red += lambert * currentMat.diffuse.red;
c.green += lambert * currentMat.diffuse.green;
c.blue += lambert * currentMat.diffuse.blue;
}
else{
if(info.reflector==1){
dfloat newcoef = r.coef;
/* start ray slightly off surface */
dfloat sc = p_shadowDelta;
if(rdotn>0) // reverse offset if inside
sc *= -1.f; // sign ? was -1
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n)); // HACK to shift ray start off service
ray_t lightRay;
lightRay.start = shadowStart;
/* Find the value of the light at this point */
for(unsigned int j=0; j < Nlights; j++){
light_t currentLight = lights[j];
vector_t dist = vectorSub(currentLight.pos, shadowStart);
if(vectorDot(n, dist) <= 0) continue;
dfloat lightDist = vectorNorm(dist);
dfloat tshadow = lightDist;
if(tshadow <= 0) continue;
lightRay.dir = vectorScale((1.f/tshadow), dist);
/* search in light ray direction for object */
int shadowShapeID = -1;
gridRayIntersectionSearch(lightRay, Nshapes, shapes, grid, &tshadow, &shadowShapeID);
// check for objects in path of shadow ray
bool inShadow = false;
if(shadowShapeID==-1) // no object causes shadow
inShadow = false;
else if(tshadow >= 0 && tshadow < lightDist) //
inShadow = true;
if(inShadow==false){
/* Lambert diffusion */
dfloat lambert = vectorDot(lightRay.dir, n) * newcoef;
c.red += lambert * currentLight.intensity.red * currentMat.diffuse.red;
c.green += lambert * currentLight.intensity.green * currentMat.diffuse.green;
c.blue += lambert * currentLight.intensity.blue * currentMat.diffuse.blue;
}
}
// reduce reflected coefficient
newcoef *= currentMat.reflection;
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays) {
ray_t reflectRay;
// create new ray starting from offset intersection, with ray direction reflected in normal
reflectRay.start = shadowStart;
reflectRay.dir = vectorAdd(r.dir, vectorScale(-2.0f*rdotn, n));
// increment level for new ray
reflectRay.level = r.level+1;
reflectRay.coef = newcoef;
// launch new ray
rayStack[Nrays] = reflectRay;
// increment ray counter
++Nrays;
}
}
// https://www.scratchapixel.com/code.php?id=13&origin=/lessons/3d-basic-rendering/introduction-to-shading
// test for refraction
if(info.refractor==1){
// can we add a new refraction ray to the stack ?
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays){
// push ray onto other side of surface
dfloat sc = -p_shadowDelta; // reverse number above
if(rdotn>0)
sc *= -1;
// HACK to shift ray start off service
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n));
// get index of refraction
dfloat eta = currentMat.eta;
if(rdotn>0){
rdotn *= -1;
}else{
eta = 1.f/eta;
}
dfloat kappa = 1.f - eta*eta*(1.f - rdotn*rdotn);
if(kappa>0){
// create new refraction ray
ray_t refractRay;
dfloat fac = eta*rdotn-sqrt(kappa);
refractRay.start = shadowStart;
refractRay.dir = vectorNormalize(vectorAdd(vectorScale(eta, r.dir), vectorScale(fac, n)));
refractRay.level = r.level+1;
refractRay.coef = r.coef; // ?
rayStack[Nrays] = refractRay;
++Nrays;
}
}
}
}
// go to next ray on stack
++rayID;
}
return c;
}
// returns the cumulative sum
int gridScan(const int N, const int *v, int *scanv){
scanv[0] = 0;
for(int n=0;n<N;++n){
scanv[n+1] = v[n]+scanv[n];
}
return scanv[N];
}
void gridCountShapesInCellsKernel(const grid_t grid, const int Nshapes, shape_t *shapes, int *counts){
int N = Nshapes;
for(int n=0;n<N;++n){
shape_t *shape = shapes+n;
shape->bbox = createBoundingBoxShape(grid, *shape);
const int imin = shape->bbox.imin;
const int imax = shape->bbox.imax;
const int jmin = shape->bbox.jmin;
const int jmax = shape->bbox.jmax;
const int kmin = shape->bbox.kmin;
const int kmax = shape->bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
int id = i + j*grid.NI + k*grid.NI*grid.NJ;
++counts[id];
}
}
}
}
}
void gridAddShapesInCellsKernel(const grid_t grid, const int Nshapes, const shape_t *shapes, int *boxCounters, int *boxContents){
for(int n=0;n<Nshapes;++n){
const shape_t *shape = shapes+n;
const int imin = shape->bbox.imin;
const int imax = shape->bbox.imax;
const int jmin = shape->bbox.jmin;
const int jmax = shape->bbox.jmax;
const int kmin = shape->bbox.kmin;
const int kmax = shape->bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
// box
const int id = i + j*grid.NI + k*grid.NI*grid.NJ;
// index in this box (post decremented)
// grab counter for this cell into index, then increment counter for this cell
boxContents[boxCounters[id]] = shape->id;
++boxCounters[id];
}
}
}
}
}
void gridPopulate(grid_t *grid, int Nshapes, shape_t *shapes){
if(grid->boxContents){
free(grid->boxContents);
free(grid->boxStarts);
//3.B.iii
hipFree(grid->boxContents);
hipFree(grid->boxStarts);
}
// how many cells in grid
int Nboxes = grid->NI*grid->NJ*grid->NK;
// count how many objects overlap each cell
int *boxCounts = (int*) calloc(Nboxes+1, sizeof(int));
gridCountShapesInCellsKernel (*grid, Nshapes, shapes, boxCounts);
// make cumulative count
grid->boxStarts = (int*) calloc(Nboxes+1, sizeof(int));
int Nentries = gridScan(Nboxes, boxCounts, grid->boxStarts);
// make a copy of boxCounts
int *boxCounters = (int*) calloc(Nboxes+1, sizeof(int));
memcpy(boxCounters, grid->boxStarts, (Nboxes+1)*sizeof(int));
// accumulate all object indices for each cell
grid->boxContents = (int*) calloc(Nentries, sizeof(int));
// add each shape to every box that intersects the shape's bounding box
gridAddShapesInCellsKernel (*grid, Nshapes, shapes, boxCounters, grid->boxContents);
//create new device arrays
hipMalloc(&c_boxContents, sizeof(int)*Nentries);
hipMemcpy(c_boxContents, boxContents, sizeof(int)*Nentries, hipMemcpyHostToDevice);
hipMalloc(&c_boxStarts, sizeof(int)*Nboxes+1);
hipMemcpy(c_boxStarts, boxStarts, sizeof(int)*Nboxes+1, hipMemcpyHostToDevice);
free(boxCounts);
free(boxCounters);
}
| d1b826662068a79e5f05f628d26c2349f2f1a689.cu | #include "simpleRayTracer.h"
// grid search
__device__ bool gridRayIntersectionSearch(const ray_t r,
const int Nshapes, const shape_t *shapes, const grid_t grid,
dfloat *t, int *currentShape){
int *boxContents = grid.boxContents;
bbox_t *bboxes = grid.bboxes;
int *boxStarts = grid.boxStarts;
// is start of ray in a grid cell ?
vector_t s = r.start; // will modify ray through s
vector_t d = r.dir;
// if ray is outside grid then project onto grid
if(s.x<grid.xmin){
if(d.x<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.x-grid.xmin)/d.x;
s.x = grid.xmin;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.x>grid.xmax){
if(d.x>=0) return false;
dfloat t0 = -(s.x-grid.xmax)/d.x;
s.x = grid.xmax;
s.y += t0*d.y;
s.z += t0*d.z;
}
if(s.y<grid.ymin){
if(d.y<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.y-grid.ymin)/d.y;
s.y = grid.ymin;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.y>grid.ymax){
if(d.y>=0) return false;
dfloat t0 = -(s.y-grid.ymax)/d.y;
s.y = grid.ymax;
s.x += t0*d.x;
s.z += t0*d.z;
}
if(s.z<grid.zmin){
if(d.z<=0) return false; // pointing away or grazing from grid
dfloat t0 = -(s.z-grid.zmin)/d.z;
s.z = grid.zmin;
s.x += t0*d.x;
s.y += t0*d.y;
}
if(s.z>grid.zmax){
if(d.z>=0) return false;
dfloat t0 = -(s.z-grid.zmax)/d.z;
s.z = grid.zmax;
s.x += t0*d.x;
s.y += t0*d.y;
}
// now the ray start must be on the surface of the grid or in a cell
int cellI = iclamp((s.x-grid.xmin)*grid.invdx,0,grid.NI-1); // assumes grid.NI
int cellJ = iclamp((s.y-grid.ymin)*grid.invdy,0,grid.NJ-1);
int cellK = iclamp((s.z-grid.zmin)*grid.invdz,0,grid.NK-1);
ray_t newr = r;
newr.start = s;
newr.dir = d;
*currentShape = -1;
do{
int cellID = cellI + grid.NI*cellJ + grid.NI*grid.NJ*cellK;
*t = 20000; // TW ?
int start = boxStarts[cellID];
int end = boxStarts[cellID+1];
for(int offset=start;offset<end;++offset){
const int obj = boxContents[offset];
const shape_t shape = shapes[obj];
if(intersectRayShape(r, &shape, t)){
vector_t intersect = vectorAdd(r.start, vectorScale(*t, r.dir));
if(intersectPointGridCell(grid, intersect, cellI, cellJ, cellK)){
*currentShape = obj;
}
}
}
if(*currentShape != -1){
return true;
}
// find faces that ray passes through
unsigned int face = intersectRayBox(&newr,bboxes[cellID]);
if(face&1) --cellK; // face 0
if(face&2) --cellJ; // face 1
if(face&4) ++cellI; // face 2
if(face&8) ++cellJ; // face 3
if(face&16) --cellI;// face 4
if(face&32) ++cellK;// face 5
if(face==0){
break;
}
}while(cellI>=0 && cellI<grid.NI &&
cellJ>=0 && cellJ<grid.NJ &&
cellK>=0 && cellK<grid.NK);
return false;
}
__device__ colour_t gridTrace(const grid_t grid,
const int Nshapes,
const shape_t *shapes,
const int Nlights,
const light_t *lights,
const int Nmaterials,
const material_t *materials,
ray_t r,
int level,
dfloat coef,
colour_t bg){
colour_t black;
black.red = 0;
black.green = 0;
black.blue = 0;
// initialize color as black
colour_t c = black;
int Nrays = 0, rayID = 0;
ray_t rayStack[p_maxNrays];
// add initial ray to stack
rayID = 0;
r.level = 0;
r.coef = coef;
rayStack[Nrays] = r;
++Nrays;
// keep looping until the stack is exhausted or the maximum number of rays is reached
while(rayID<Nrays && Nrays<p_maxNrays){
// get ray
r = rayStack[rayID];
// look for intersection of this ray with shapes
int currentShapeID = -1;
dfloat t = 20000.f;
// look through grid to find intersections with ray
gridRayIntersectionSearch(r, Nshapes, shapes, grid, &t, ¤tShapeID);
// none found
if(currentShapeID == -1){
if(rayID==0)
c = bg;
// go to next ray
++rayID;
continue;
}
// shape at nearest ray intersection
shape_t currentShape = shapes[currentShapeID];
// compute intersection location
vector_t intersection = vectorAdd(r.start, vectorScale(t, r.dir));
// find unit surface normal
vector_t n = shapeComputeNormal(intersection, currentShape);
/* use shadow tracing to determine color contribution from this intersection */
dfloat rdotn = vectorDot(r.dir, n);
/* Find the material to determine the colour */
material_t currentMat = shapeComputeMaterial(Nmaterials, materials, intersection, currentShape);
// test for reflection
info_t info = currentMat.info;
if(info.emitter==1){
dfloat lambert = rdotn * r.coef;
c.red += lambert * currentMat.diffuse.red;
c.green += lambert * currentMat.diffuse.green;
c.blue += lambert * currentMat.diffuse.blue;
}
else{
if(info.reflector==1){
dfloat newcoef = r.coef;
/* start ray slightly off surface */
dfloat sc = p_shadowDelta;
if(rdotn>0) // reverse offset if inside
sc *= -1.f; // sign ? was -1
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n)); // HACK to shift ray start off service
ray_t lightRay;
lightRay.start = shadowStart;
/* Find the value of the light at this point */
for(unsigned int j=0; j < Nlights; j++){
light_t currentLight = lights[j];
vector_t dist = vectorSub(currentLight.pos, shadowStart);
if(vectorDot(n, dist) <= 0) continue;
dfloat lightDist = vectorNorm(dist);
dfloat tshadow = lightDist;
if(tshadow <= 0) continue;
lightRay.dir = vectorScale((1.f/tshadow), dist);
/* search in light ray direction for object */
int shadowShapeID = -1;
gridRayIntersectionSearch(lightRay, Nshapes, shapes, grid, &tshadow, &shadowShapeID);
// check for objects in path of shadow ray
bool inShadow = false;
if(shadowShapeID==-1) // no object causes shadow
inShadow = false;
else if(tshadow >= 0 && tshadow < lightDist) //
inShadow = true;
if(inShadow==false){
/* Lambert diffusion */
dfloat lambert = vectorDot(lightRay.dir, n) * newcoef;
c.red += lambert * currentLight.intensity.red * currentMat.diffuse.red;
c.green += lambert * currentLight.intensity.green * currentMat.diffuse.green;
c.blue += lambert * currentLight.intensity.blue * currentMat.diffuse.blue;
}
}
// reduce reflected coefficient
newcoef *= currentMat.reflection;
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays) {
ray_t reflectRay;
// create new ray starting from offset intersection, with ray direction reflected in normal
reflectRay.start = shadowStart;
reflectRay.dir = vectorAdd(r.dir, vectorScale(-2.0f*rdotn, n));
// increment level for new ray
reflectRay.level = r.level+1;
reflectRay.coef = newcoef;
// launch new ray
rayStack[Nrays] = reflectRay;
// increment ray counter
++Nrays;
}
}
// https://www.scratchapixel.com/code.php?id=13&origin=/lessons/3d-basic-rendering/introduction-to-shading
// test for refraction
if(info.refractor==1){
// can we add a new refraction ray to the stack ?
if((r.level+1<p_maxLevel) && Nrays<p_maxNrays){
// push ray onto other side of surface
dfloat sc = -p_shadowDelta; // reverse number above
if(rdotn>0)
sc *= -1;
// HACK to shift ray start off service
vector_t shadowStart = vectorAdd(intersection, vectorScale(sc, n));
// get index of refraction
dfloat eta = currentMat.eta;
if(rdotn>0){
rdotn *= -1;
}else{
eta = 1.f/eta;
}
dfloat kappa = 1.f - eta*eta*(1.f - rdotn*rdotn);
if(kappa>0){
// create new refraction ray
ray_t refractRay;
dfloat fac = eta*rdotn-sqrt(kappa);
refractRay.start = shadowStart;
refractRay.dir = vectorNormalize(vectorAdd(vectorScale(eta, r.dir), vectorScale(fac, n)));
refractRay.level = r.level+1;
refractRay.coef = r.coef; // ?
rayStack[Nrays] = refractRay;
++Nrays;
}
}
}
}
// go to next ray on stack
++rayID;
}
return c;
}
// returns the cumulative sum
int gridScan(const int N, const int *v, int *scanv){
scanv[0] = 0;
for(int n=0;n<N;++n){
scanv[n+1] = v[n]+scanv[n];
}
return scanv[N];
}
void gridCountShapesInCellsKernel(const grid_t grid, const int Nshapes, shape_t *shapes, int *counts){
int N = Nshapes;
for(int n=0;n<N;++n){
shape_t *shape = shapes+n;
shape->bbox = createBoundingBoxShape(grid, *shape);
const int imin = shape->bbox.imin;
const int imax = shape->bbox.imax;
const int jmin = shape->bbox.jmin;
const int jmax = shape->bbox.jmax;
const int kmin = shape->bbox.kmin;
const int kmax = shape->bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
int id = i + j*grid.NI + k*grid.NI*grid.NJ;
++counts[id];
}
}
}
}
}
void gridAddShapesInCellsKernel(const grid_t grid, const int Nshapes, const shape_t *shapes, int *boxCounters, int *boxContents){
for(int n=0;n<Nshapes;++n){
const shape_t *shape = shapes+n;
const int imin = shape->bbox.imin;
const int imax = shape->bbox.imax;
const int jmin = shape->bbox.jmin;
const int jmax = shape->bbox.jmax;
const int kmin = shape->bbox.kmin;
const int kmax = shape->bbox.kmax;
for(int k=kmin;k<=kmax;++k){
for(int j=jmin;j<=jmax;++j){
for(int i=imin;i<=imax;++i){
// box
const int id = i + j*grid.NI + k*grid.NI*grid.NJ;
// index in this box (post decremented)
// grab counter for this cell into index, then increment counter for this cell
boxContents[boxCounters[id]] = shape->id;
++boxCounters[id];
}
}
}
}
}
void gridPopulate(grid_t *grid, int Nshapes, shape_t *shapes){
if(grid->boxContents){
free(grid->boxContents);
free(grid->boxStarts);
//3.B.iii
cudaFree(grid->boxContents);
cudaFree(grid->boxStarts);
}
// how many cells in grid
int Nboxes = grid->NI*grid->NJ*grid->NK;
// count how many objects overlap each cell
int *boxCounts = (int*) calloc(Nboxes+1, sizeof(int));
gridCountShapesInCellsKernel (*grid, Nshapes, shapes, boxCounts);
// make cumulative count
grid->boxStarts = (int*) calloc(Nboxes+1, sizeof(int));
int Nentries = gridScan(Nboxes, boxCounts, grid->boxStarts);
// make a copy of boxCounts
int *boxCounters = (int*) calloc(Nboxes+1, sizeof(int));
memcpy(boxCounters, grid->boxStarts, (Nboxes+1)*sizeof(int));
// accumulate all object indices for each cell
grid->boxContents = (int*) calloc(Nentries, sizeof(int));
// add each shape to every box that intersects the shape's bounding box
gridAddShapesInCellsKernel (*grid, Nshapes, shapes, boxCounters, grid->boxContents);
//create new device arrays
cudaMalloc(&c_boxContents, sizeof(int)*Nentries);
cudaMemcpy(c_boxContents, boxContents, sizeof(int)*Nentries, cudaMemcpyHostToDevice);
cudaMalloc(&c_boxStarts, sizeof(int)*Nboxes+1);
cudaMemcpy(c_boxStarts, boxStarts, sizeof(int)*Nboxes+1, cudaMemcpyHostToDevice);
free(boxCounts);
free(boxCounters);
}
|
f612826ab21b69ea7fa5d1879504c8c18d9e7e5a.hip | // !!! This is a file automatically generated by hipify!!!
// MaskedKlustaKwik2.C
//
// Fast clustering using the CEM algorithm with Masks.
# pragma warning (disable:4819)
#ifndef VERSION
#define VERSION "0.3.0-nogit"
#endif
// Disable some Visual Studio warnings
#define _CRT_SECURE_NO_WARNINGS
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <rocblas.h>
#include <cusolverDn.h>
#include <stdio.h>
#include <iostream>
#include "klustakwik.h"
#include "util.h"
#include<stdlib.h>
#define _USE_MATH_DEFINES
#include<math.h>
#define BLOCKDIM 128
#ifdef _OPENMP
#include<omp.h>
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = false) {
if (code != hipSuccess) {
Output("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
else {
//printf("cuda returned code == hipSuccess\n");
}
}
// GLOBAL VARIABLES
FILE *Distfp;
int global_numiterations = 0;
float iteration_metric2 = (float)0;
float iteration_metric3 = (float)0;
clock_t Clock0;
float timesofar;
//===========================================================================================
template<class T>
inline void resize_and_fill_with_zeros(vector<T> &x, int newsize)
{
if (x.size() == 0)
{
x.resize((unsigned int)newsize);
return;
}
if (x.size() > (unsigned int)newsize)
{
fill(x.begin(), x.end(), (T)0);
x.resize((unsigned int)newsize);
}
else
{
x.resize((unsigned int)newsize);
fill(x.begin(), x.end(), (T)0);
}
}
//===========================================init d_ones========================================================//
__global__ void init_dones(int nDims, float *d_ones) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < 2000) d_ones[tid] = 1.0;
}
// Sets storage for KK class. Needs to have nDims and nPoints defined
void KK::AllocateArrays() {
nDims2 = nDims*nDims;
NoisePoint = 1; // Ensures that the mixture weight for the noise cluster never gets to zero
// Set sizes for arrays
resize_and_fill_with_zeros(Data, nPoints * nDims);
resize_and_fill_with_zeros(Masks, nPoints * nDims);
resize_and_fill_with_zeros(FloatMasks, nPoints * nDims);
resize_and_fill_with_zeros(UnMaskDims, nPoints); //SNK Number of unmasked dimensions for each data point when using float masks $\sum m_i$
resize_and_fill_with_zeros(Weight, MaxPossibleClusters);
resize_and_fill_with_zeros(Mean, MaxPossibleClusters*nDims);
resize_and_fill_with_zeros(LogP, MaxPossibleClusters*nPoints);
resize_and_fill_with_zeros(Class, nPoints);
resize_and_fill_with_zeros(OldClass, nPoints);
resize_and_fill_with_zeros(Class2, nPoints);
resize_and_fill_with_zeros(BestClass, nPoints);
resize_and_fill_with_zeros(ClassAlive, MaxPossibleClusters);
resize_and_fill_with_zeros(AliveIndex, MaxPossibleClusters);
resize_and_fill_with_zeros(ClassPenalty, MaxPossibleClusters);
resize_and_fill_with_zeros(nClassMembers, MaxPossibleClusters);
resize_and_fill_with_zeros(CorrectionTerm, nPoints * nDims);
resize_and_fill_with_zeros(ClusterMask, MaxPossibleClusters*nDims);
resize_and_fill_with_zeros(Offset, MaxPossibleClusters);
//==============================GPU Allocate==============================
//int sizeI = sizeof(int);
//int sizeF = sizeof(float);
//gpuErrchk(hipMalloc((void **)&d_Class, nPoints*sizeI));
//gpuErrchk(hipMalloc((void **)&d_Data, nPoints*nDims*sizeF));
//gpuErrchk(hipMalloc((void **)&d_Masks, nPoints*nDims*sizeof(int)));
//gpuErrchk(hipMalloc((void **)&d_nClassMembers, MaxPossibleClusters*sizeI));
//gpuErrchk(hipMalloc((void **)&d_ClassAlive, MaxPossibleClusters*sizeI));
//gpuErrchk(hipMalloc((void **)&d_AliveIndex, MaxPossibleClusters*sizeI));
//gpuErrchk(hipMalloc((void **)&d_NoiseMean, nDims*sizeF));
//gpuErrchk(hipMalloc((void **)&d_NoiseVariance, nDims*sizeF));
//gpuErrchk(hipMalloc((void **)&d_CorrectionTerm, nPoints*nDims*sizeF));
//gpuErrchk(hipMalloc((void **)&d_FloatMasks, nPoints*nDims*sizeF));
//gpuErrchk(hipMalloc((void **)&d_UnMaskDims, nPoints*sizeF));
//gpuErrchk(hipMalloc((void **)&d_ClusterMask, MaxPossibleClusters*nDims*sizeF));
//gpuErrchk(hipMalloc((void **)&d_Mean, MaxPossibleClusters*nDims*sizeF));
//gpuErrchk(hipMalloc((void **)&d_Weight, MaxPossibleClusters*sizeF));
//gpuErrchk(hipMalloc((void **)&d_LogP, MaxPossibleClusters*nPoints*sizeF));
//gpuErrchk(hipMalloc((void **)&d_OldClass, nPoints*sizeI));
//gpuErrchk(hipMalloc((void **)&d_ClassPenalty, MaxPossibleClusters*sizeF));
//gpuErrchk(hipMalloc((void **)&d_Class2, nPoints*sizeI));
//gpuErrchk(hipMalloc((void **)&d_BestClass, nPoints*sizeI));
////temp variables
//gpuErrchk(hipMalloc((void **)&d_ClassAliveTemp, MaxPossibleClusters*sizeI));
//gpuErrchk(hipMalloc((void **)&d_DeletionLoss, MaxPossibleClusters*sizeF));
//gpuErrchk(hipMalloc((void **)&d_tempSubtraction, MaxPossibleClusters*sizeF));
//gpuErrchk(hipMalloc((void **)&d_tempLogP, nPoints*sizeF));
//gpuErrchk(hipMalloc((void **)&d_tempOldClass, nPoints*sizeF));
////MEstep
//gpuErrchk(hipMalloc((void **)&d_unmaskedSolver, nPoints*sizeF));
//gpuErrchk(hipMalloc((void **)&d_AllVector2Mean, nPoints*nDims * sizeof(float)));
//gpuErrchk(hipMalloc((void **)&d_Current, MaxPossibleClusters * nDims * sizeof(int)));
//gpuErrchk(hipMalloc((void **)&d_PointsInThisClass, nPoints * sizeof(int)));
//gpuErrchk(hipMalloc((void **)&d_MarkClass, nPoints * sizeof(int)));
//gpuErrchk(hipMalloc((void **)&d_Offset, MaxPossibleClusters * sizeof(int)));
////for loop E step
//gpuErrchk(hipMalloc((void **)&d_pIndex, nPoints * sizeof(int)));
//gpuErrchk(hipMalloc((void **)&d_points2Mean, nPoints*nDims * sizeof(float)));
//gpuErrchk(hipMalloc((void **)&d_InvCovDiag, nDims * sizeof(float)));
//gpuErrchk(hipMalloc((void **)&d_temp, nDims * sizeof(float)));
//gpuErrchk(hipMalloc((void **)&d_updatePointsList, nPoints * sizeof(int)));
//
gpuErrchk(hipMalloc((void **)&d_ones, 2000 * sizeof(float)));
init_dones << <(2000 + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >> > (nDims, d_ones);
}
// Penalty for standard CEM
// Penalty(nAlive) returns the complexity penalty for that many clusters
// bearing in mind that cluster 0 has no free params except p.
float KK::Penalty(int n)
{
int nParams;
if (n == 1)
return 0;
nParams = (nDims*(nDims + 1) / 2 + nDims + 1)*(n - 1); // each has cov, mean, &p
float p = penaltyK*(float)(nParams) // AIC units (Spurious factor of 2 removed from AIC units on 09.07.13)
+penaltyKLogN*((float)nParams*(float)log((float)nPoints) / 2); // BIC units
return p;
}
//======================================ComputeClassPenalties================================================
__global__ void c_nnClassMembers(int nPoints, int *d_Class, int *d_nClassMembers) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
atomicAdd(&d_nClassMembers[d_Class[tidx]], 1);
}
}
__global__ void initClassPenalty(int MaxPossibleClusters, float *d_ClassPenalty) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < MaxPossibleClusters)
d_ClassPenalty[tidx] = (float)0;
}
__global__ void updateClassPenalty(int nPoints, int *d_Class, float *d_UnMaskDims,float *d_ClassPenalty) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
float n = d_UnMaskDims[tidx];
atomicAdd(&d_ClassPenalty[d_Class[tidx]], (n*(n + 1) / 2 + n + 1));
}
}
__global__ void computeClassPenalty(int MaxPossibleClusters, int nPoints,float penaltyK,float penaltyKLogN,
int *d_nClassMembers, float *d_ClassPenalty) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < MaxPossibleClusters) {
if (d_nClassMembers[tidx]>0)
d_ClassPenalty[tidx] /= (float)d_nClassMembers[tidx];
float nParams = d_ClassPenalty[tidx];
d_ClassPenalty[tidx] = penaltyK*(float)(nParams * 2)
+ penaltyKLogN*((float)nParams*(float)log((float)nPoints) / 2);
}
}
// Penalties for Masked CEM
void KK::ComputeClassPenalties()
{
if (useCpu) {
// Output("ComputeClassPenalties: Correct if UseDistributional only");
for (int c = 0; c < MaxPossibleClusters; c++)
ClassPenalty[c] = (float)0;
// compute sum of nParams for each
vector<int> NumberInClass(MaxPossibleClusters);
for (int p = 0; p < nPoints; p++)
{
int c = Class[p];
NumberInClass[c]++;
// int n = UnmaskedInd[p+1]-UnmaskedInd[p]; // num unmasked dimensions
float n = UnMaskDims[p];
float nParams = n*(n + 1) / 2 + n + 1;
ClassPenalty[c] += nParams;
}
// compute mean nParams for each cluster
for (int c = 0; c < MaxPossibleClusters; c++)
if (NumberInClass[c] > 0)
ClassPenalty[c] /= (float)NumberInClass[c];
// compute penalty for each cluster
for (int c = 0; c < MaxPossibleClusters; c++)
{
float nParams = ClassPenalty[c];
ClassPenalty[c] = penaltyK*(float)(nParams * 2)
+ penaltyKLogN*((float)nParams*(float)log((float)nPoints) / 2);
}
}
//=======================================GPU code======================================
else {
initClassPenalty << <(MaxPossibleClusters+BLOCKDIM - 1)/ BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, d_ClassPenalty);
updateClassPenalty << <(nPoints+ BLOCKDIM - 1)/ BLOCKDIM, BLOCKDIM >> > (nPoints,d_Class,d_UnMaskDims,d_ClassPenalty);
computeClassPenalty << <(MaxPossibleClusters + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, nPoints, penaltyK, penaltyKLogN,
d_nClassMembers,d_ClassPenalty);
//Output("d_ClassPenalty.size: %d\n", d_ClassPenalty.size());
//for (int i = 0; i < d_ClassPenalty.size(); i++) std::cout << d_ClassPenalty[i] << " ";
//Output("\n");
}
}
//===========================================CStep==================================================
__global__ void d_cstep(int MaxPossibleClusters, int nPoints, bool allow_assign_to_noise, int nClustersAlive, float HugeScore,
int *d_OldClass, int *d_Class, int *d_Class2, int *d_AliveIndex, float *d_LogP) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < nPoints)
{
d_OldClass[tid] = d_Class[tid];
float BestScore = HugeScore;
float SecondScore = HugeScore;
float ThisScore;
int TopClass = 0;
int SecondClass = 0;
int ccstart = 0, c;
if (!allow_assign_to_noise)
ccstart = 1;
for (int cc = ccstart; cc<nClustersAlive; cc++)
{
c = d_AliveIndex[cc];
ThisScore = d_LogP[tid*MaxPossibleClusters + c];
if (ThisScore < BestScore)
{
SecondClass = TopClass;
TopClass = c;
SecondScore = BestScore;
BestScore = ThisScore;
}
else if (ThisScore < SecondScore)
{
SecondClass = c;
SecondScore = ThisScore;
}
}
d_Class[tid] = TopClass;
d_Class2[tid] = SecondClass;
}
}
// Choose best class for each point (and second best) out of those living
void KK::CStep(bool allow_assign_to_noise)
{
if (useCpu) {
int p, c, cc, TopClass, SecondClass;
int ccstart = 0;
if (!allow_assign_to_noise)
ccstart = 1;
float ThisScore, BestScore, SecondScore;
for (p = 0; p < nPoints; p++)
{
OldClass[p] = Class[p];
BestScore = HugeScore;
SecondScore = HugeScore;
TopClass = SecondClass = 0;
for (cc = ccstart; cc < nClustersAlive; cc++)
{
c = AliveIndex[cc];
ThisScore = LogP[p*MaxPossibleClusters + c];
if (ThisScore < BestScore)
{
SecondClass = TopClass;
TopClass = c;
SecondScore = BestScore;
BestScore = ThisScore;
}
else if (ThisScore < SecondScore)
{
SecondClass = c;
SecondScore = ThisScore;
}
}
Class[p] = TopClass;
Class2[p] = SecondClass;
}
}
//=====================================GPUcode=======================================
else {
d_cstep << <(nPoints + BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, nPoints, allow_assign_to_noise, nClustersAlive, HugeScore,
d_OldClass,d_Class,d_Class2,d_AliveIndex,d_LogP);
/*
Output("d_OldClass.size: %d\n", d_OldClass.size());
for (int i = 0; i < d_OldClass.size(); i++) std::cout << d_OldClass[i] << " ";
Output("\n");
Output("d_Class.size: %d\n", d_Class.size());
for (int i = 0; i < d_Class.size(); i++) std::cout << d_Class[i] << " ";
Output("\n");
Output("d_Class2.size: %d\n", d_Class2.size());
for (int i = 0; i < d_Class2.size(); i++) std::cout << d_Class2[i] << " ";
Output("\n");
*/
}
}
//======================================ConsiderDeletion============================================
__global__ void initDeletionLoss(int MaxPossibleClusters, float HugeScore, int *d_ClassAlive,float *d_DeletionLoss) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx > 0 && tidx < MaxPossibleClusters) {
if (d_ClassAlive[tidx]) d_DeletionLoss[tidx] = 0;
else d_DeletionLoss[tidx] = HugeScore;
}
}
__global__ void computeDeletionLoss(int nPoints, int MaxPossibleClusters,int *d_Class, int *d_Class2,float *d_LogP, float *d_DeletionLoss) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
float s = d_LogP[tidx*MaxPossibleClusters + d_Class2[tidx]] - d_LogP[tidx*MaxPossibleClusters + d_Class[tidx]];
atomicAdd(&d_DeletionLoss[d_Class[tidx]], s);
}
}
__global__ void subtractionLoss(int MaxPossibleClusters, float HugeScore,float *d_ClassPenalty,float *d_DeletionLoss,float *d_tempSubtraction) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < MaxPossibleClusters) {
if (tidx == 0)d_tempSubtraction[tidx] = HugeScore;
else d_tempSubtraction[tidx] = d_DeletionLoss[tidx] - d_ClassPenalty[tidx];
}
}
__global__ void reallocatePoints(int nPoints, int CandidateClass, int* d_ClassAlive, int *d_Class, int *d_Class2){
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
if (d_Class[tidx] == CandidateClass)
d_Class[tidx] = d_Class2[tidx];
if(tidx == 0)
d_ClassAlive[CandidateClass] = 0;
}
}
struct KeyValue {
int id;
float v;
};
//<<<1,512>>> so the array max length is 1024,single block;
__global__ void findMin(int n, float *d_s, KeyValue *d_result) {
extern __shared__ KeyValue mixdata[];
int tid = threadIdx.x;
mixdata[tid].id = tid;
mixdata[tid].v = 100000000.0;
if (tid + blockDim.x < n) {
if (d_s[tid] > d_s[tid + blockDim.x]) mixdata[tid].v = d_s[tid + blockDim.x], mixdata[tid].id = mixdata[tid + blockDim.x].id;
else mixdata[tid].v = d_s[tid];
}__syncthreads();
if (tid < 256) {
if (mixdata[tid].v > mixdata[tid + 256].v) mixdata[tid].v = mixdata[tid + 256].v, mixdata[tid].id = mixdata[tid + 256].id;
} __syncthreads();
if (tid < 128) {
if (mixdata[tid].v > mixdata[tid + 128].v) mixdata[tid].v = mixdata[tid + 128].v, mixdata[tid].id = mixdata[tid + 128].id;
} __syncthreads();
if (tid <64) {
if (mixdata[tid].v > mixdata[tid + 64].v) mixdata[tid].v = mixdata[tid + 64].v, mixdata[tid].id = mixdata[tid + 64].id;
} __syncthreads();
if (tid < 32) {
if (mixdata[tid].v > mixdata[tid + 32].v) mixdata[tid].v = mixdata[tid + 32].v, mixdata[tid].id = mixdata[tid + 32].id;
if (mixdata[tid].v > mixdata[tid + 16].v) mixdata[tid].v = mixdata[tid + 16].v, mixdata[tid].id = mixdata[tid + 16].id;
if (mixdata[tid].v > mixdata[tid + 8].v) mixdata[tid].v = mixdata[tid + 8].v, mixdata[tid].id = mixdata[tid + 8].id;
if (mixdata[tid].v > mixdata[tid + 4].v) mixdata[tid].v = mixdata[tid + 4].v, mixdata[tid].id = mixdata[tid + 4].id;
if (mixdata[tid].v > mixdata[tid + 2].v) mixdata[tid].v = mixdata[tid + 2].v, mixdata[tid].id = mixdata[tid + 2].id;
if (mixdata[tid].v > mixdata[tid + 1].v) mixdata[tid].v = mixdata[tid + 1].v, mixdata[tid].id = mixdata[tid + 1].id;
}
if (tid == 0) d_result[0].v = mixdata[0].v, d_result[0].id = mixdata[0].id;
}
KeyValue findResult(int n, float *d_s) {
KeyValue *d_result;
hipMalloc((void **)&d_result, sizeof(KeyValue));
hipLaunchKernelGGL(( findMin), dim3(1),dim3(512), 512 * sizeof(KeyValue) , 0, n, d_s, d_result);
KeyValue h_result;
hipMemcpy(&h_result, d_result, sizeof(KeyValue), hipMemcpyDeviceToHost);
hipFree(d_result);
return h_result;
}
// Sometimes deleting a cluster will improve the score, when you take into account
// the BIC. This function sees if this is the case. It will not delete more than
// one cluster at a time.
void KK::ConsiderDeletion()
{
if(useCpu){
int c, p, CandidateClass = 0;
float Loss, DeltaPen;
vector<float> DeletionLoss(MaxPossibleClusters); // the increase in log P by deleting the cluster
if (Debug)
Output(" Entering ConsiderDeletion: ");
for (c = 1; c < MaxPossibleClusters; c++){
if (ClassAlive[c]) DeletionLoss[c] = 0;
else DeletionLoss[c] = HugeScore; // don't delete classes that are already there
}
// compute losses by deleting clusters
vector<int> NumberInClass(MaxPossibleClusters);
for (p = 0; p < nPoints; p++){
DeletionLoss[Class[p]] += LogP[p*MaxPossibleClusters + Class2[p]] - LogP[p*MaxPossibleClusters + Class[p]];
int ccc = Class[p];
NumberInClass[ccc]++; // For computing number of points in each class
}
// find class with smallest increase in total score
Loss = HugeScore;
if (UseDistributional) //For UseDistribution, we use the ClusterPenalty
{
for (c = 1; c < MaxPossibleClusters; c++){
if ((DeletionLoss[c] - ClassPenalty[c]) < Loss){
Loss = DeletionLoss[c] - ClassPenalty[c];
CandidateClass = c;
}
}
}// or in the case of fixed penalty find class with least to lose
// what is the change in penalty?
if (UseDistributional) //For the distributional algorithm we need to use the ClusterPenalty
DeltaPen = ClassPenalty[CandidateClass];
//Output("cand Class %d would lose %f gain is %f\n", (int)CandidateClass, Loss, DeltaPen);
// is it worth it?
//06/12/12 fixing bug introduced which considered DeltaPen twice!
if (UseDistributional) //For the distributional algorithm we need to use the ClusterPenalty
{
if (Loss < 0){
Output("Deleting Class %d (%d points): Lose %f but Gain %f\n", (int)CandidateClass, (int)NumberInClass[CandidateClass], DeletionLoss[CandidateClass], DeltaPen);
// set it to dead
ClassAlive[CandidateClass] = 0;
// re-allocate all of its points
for (p = 0; p < nPoints; p++) if (Class[p] == CandidateClass) Class[p] = Class2[p];
// recompute class penalties
ComputeClassPenalties();
}
}
Reindex();
}
//=============================================GPU code=======================================
else {
initDeletionLoss << <(MaxPossibleClusters + BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, HugeScore, d_ClassAlive,d_DeletionLoss);
computeDeletionLoss << <(nPoints + BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (nPoints, MaxPossibleClusters,d_Class,d_Class2,d_LogP,d_DeletionLoss);
//compute minloss and index
subtractionLoss << <MaxPossibleClusters/BLOCKDIM + 1, BLOCKDIM >> > (MaxPossibleClusters, HugeScore,
d_ClassPenalty, d_DeletionLoss, d_tempSubtraction);
KeyValue result = findResult(MaxPossibleClusters, d_tempSubtraction);
//float minLoss = result.v;int CandidateClass = result.id;
if (result.v < 0) {
//Output("Deleting Class %d (%d points): Lose %f but Gain %f\n", (int)CandidateClass, (int)d_NumberInClass[CandidateClass], d_DeletionLoss[CandidateClass], d_ClassPenalty[CandidateClass]);
reallocatePoints << <nPoints/BLOCKDIM + 1, BLOCKDIM >> > (nPoints, result.id,
d_ClassAlive, d_Class, d_Class2);
ComputeClassPenalties();
}
Reindex();
hipMemcpy(&AliveIndex[0], d_AliveIndex, MaxPossibleClusters*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&nClassMembers[0], d_nClassMembers, MaxPossibleClusters * sizeof(int), hipMemcpyDeviceToHost);
/*
Output("this is in the ConsiderDeletion:=======================\n");
Output("d_AliveIndex: %d\n", d_AliveIndex.size());
for (int i = 0;i < d_AliveIndex.size();i++) cout << d_AliveIndex[i] << " ";
Output("\n");
Output("d_ClassAlive: %d\n", d_ClassAlive.size());
for (int i = 0;i < d_ClassAlive.size();i++) cout << d_ClassAlive[i] << " ";
Output("\n");
*/
}
}
// LoadClu(CluFile)
void KK::LoadClu(char *CluFile)
{
FILE *fp;
int p, c;
int val; // read in from %d
int status;
fp = fopen_safe(CluFile, "r");
status = fscanf(fp, "%d", &nStartingClusters);
nClustersAlive = nStartingClusters;// -1;
for (c = 0; c<MaxPossibleClusters; c++) ClassAlive[c] = (c<nStartingClusters);
for (p = 0; p<nPoints; p++)
{
status = fscanf(fp, "%d", &val);
if (status == EOF) Error("Error reading cluster file");
Class[p] = val - 1;
}
}
// for each cluster, try to split it in two. if that improves the score, do it.
// returns 1 if split was successful
int KK::TrySplits()
{
int c, cc, c2, p, p2, DidSplit = 0;
float Score, NewScore, UnsplitScore, SplitScore;
int UnusedCluster;
//KK K2; // second KK structure for sub-clustering
//KK K3; // third one for comparison
if (nClustersAlive >= MaxPossibleClusters - 1)
{
Output("Won't try splitting - already at maximum number of clusters\n");
return 0;
}
// set up K3 and remember to add the masks
//KK K3(*this);
if (!AlwaysSplitBimodal)
{
if (KK_split == NULL)
{
KK_split = new KK(*this);
}
else
{
// We have to clear these to bypass the debugging checks
// in precomputations.cpp
KK_split->Unmasked.clear();
KK_split->UnmaskedInd.clear();
KK_split->SortedMaskChange.clear();
KK_split->SortedIndices.clear();
// now we treat it as empty
KK_split->ConstructFrom(*this);
}
}
//KK &K3 = *KK_split;
#define K3 (*KK_split)
Output("Compute initial score before splitting: ");
Score = ComputeScore();
// loop the clusters, trying to split
for (cc = 1; cc<nClustersAlive; cc++)
{
c = AliveIndex[cc];
// set up K2 structure to contain points of this cluster only
vector<int> SubsetIndices;
for (p = 0; p<nPoints; p++)
if (Class[p] == c)
SubsetIndices.push_back(p);
if (SubsetIndices.size() == 0)
continue;
if (K2_container)
{
// We have to clear these to bypass the debugging checks
// in precomputations.cpp
K2_container->Unmasked.clear();
K2_container->UnmaskedInd.clear();
K2_container->SortedMaskChange.clear();
K2_container->SortedIndices.clear();
//K2_container->AllVector2Mean.clear();
// now we treat it as empty
K2_container->ConstructFrom(*this, SubsetIndices);
}
else
{
K2_container = new KK(*this, SubsetIndices);
}
//KK K2(*this, SubsetIndices);
KK &K2 = *K2_container;
// find an unused cluster
UnusedCluster = -1;
for (c2 = 1; c2<MaxPossibleClusters; c2++){
if (!ClassAlive[c2]){
UnusedCluster = c2;
break;
}
}
if (UnusedCluster == -1)
{
Output("No free clusters, abandoning split");
return DidSplit;
}
// do it
if (Verbose >= 1) Output("\n Trying to split cluster %d (%d points) \n", (int)c, (int)K2.nPoints);
K2.nStartingClusters = 2; // (2 = 1 clusters + 1 unused noise cluster)
UnsplitScore = K2.CEM(NULL, 0, 1, false);
K2.nStartingClusters = 3; // (3 = 2 clusters + 1 unused noise cluster)
SplitScore = K2.CEM(NULL, 0, 1, false);
// Fix by Michal Zugaro: replace next line with following two lines
// if(SplitScore<UnsplitScore) {
if (K2.nClustersAlive<2) Output("\n Split failed - leaving alone\n");
if ((SplitScore<UnsplitScore) && (K2.nClustersAlive >= 2)) {
if (AlwaysSplitBimodal)
{
DidSplit = 1;
Output("\n We are always splitting bimodal clusters so it's getting split into cluster %d.\n", (int)UnusedCluster);
p2 = 0;
for (p = 0; p < nPoints; p++)
{
if (Class[p] == c)
{
if (K2.Class[p2] == 1) Class[p] = c;
else if (K2.Class[p2] == 2) Class[p] = UnusedCluster;
else Error("split should only produce 2 clusters\n");
p2++;
}
ClassAlive[Class[p]] = 1;
}
}
else
{
// will splitting improve the score in the whole data set?
// assign clusters to K3
for (c2 = 0; c2 < MaxPossibleClusters; c2++) K3.ClassAlive[c2] = 0;
// Output("%d Points in class %d in KKobject K3 ", (int)c2, (int)K3.nClassMembers[c2]);
p2 = 0;
for (p = 0; p < nPoints; p++)
{
if (Class[p] == c)
{
if (K2.Class[p2] == 1) K3.Class[p] = c;
else if (K2.Class[p2] == 2) K3.Class[p] = UnusedCluster;
else Error("split should only produce 2 clusters\n");
p2++;
}
else K3.Class[p] = Class[p];
K3.ClassAlive[K3.Class[p]] = 1;
}
K3.Reindex();
// compute scores
K3.MEstep();
//K3.MStep();
//K3.EStep();
//Output("About to compute K3 class penalties");
if (UseDistributional) K3.ComputeClassPenalties(); //SNK Fixed bug: Need to compute the cluster penalty properly, cluster penalty is only used in UseDistributional mode
NewScore = K3.ComputeScore();
Output("\nSplitting cluster %d changes total score from %f to %f\n", (int)c, Score, NewScore);
if (NewScore < Score)
{
DidSplit = 1;
Output("\n So it's getting split into cluster %d.\n", (int)UnusedCluster);
// so put clusters from K3 back into main KK struct (K1)
for (c2 = 0; c2 < MaxPossibleClusters; c2++) ClassAlive[c2] = K3.ClassAlive[c2];
for (p = 0; p < nPoints; p++) Class[p] = K3.Class[p];
}
else
{
Output("\n So it's not getting split.\n");
}
}
}
}
return DidSplit;
#undef K3
}
//=========================================ComputeScore==============================================
__global__ void copyLogP(int nPoints, int MaxPossibleClusters, int *d_Class, float *d_LogP, float *d_tempLogP) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints)
d_tempLogP[tidx] = d_LogP[tidx * MaxPossibleClusters + d_Class[tidx]];
}
// ComputeScore() - computes total score. Requires M, E, and C steps to have been run
float KK::ComputeScore()
{
if (useCpu) {
int p;
// int debugadd;
float penalty = (float)0;
if (UseDistributional) // For distributional algorithm we require the cluster penalty
for (int c = 0; c < MaxPossibleClusters; c++)
penalty += ClassPenalty[c];
else
penalty = Penalty(nClustersAlive);
float Score = penalty;
for (p = 0; p < nPoints; p++)
{ //debugadd = LogP[p*MaxPossibleClusters + Class[p]];
Score += LogP[p*MaxPossibleClusters + Class[p]];
// Output("point %d: cumulative score %f adding%f\n", (int)p, Score, debugadd);
}
//Error("Score: %f Penalty: %f\n", Score, penalty);
Output(" Score: Raw %f + Penalty %f = %f\n", Score - penalty, penalty, Score);
if (Debug) {
int c, cc;
float tScore;
for (cc = 0; cc < nClustersAlive; cc++) {
c = AliveIndex[cc];
tScore = 0;
for (p = 0; p < nPoints; p++) if (Class[p] == c) tScore += LogP[p*MaxPossibleClusters + Class[p]];
Output("class %d has subscore %f\n", c, tScore);
}
}
return Score;
}
//====================================GPU code=========================
else {
float penalty;// = reduceFlt<128>(d_ClassPenalty, MaxPossibleClusters);
hipblasSasum(handle, MaxPossibleClusters, d_ClassPenalty, 1, &penalty);
copyLogP << <(nPoints+BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (nPoints, MaxPossibleClusters, d_Class,
d_LogP,d_tempLogP);
float Score;
hipblasSasum(handle, nPoints, d_tempLogP, 1, &Score);
Score = -Score;
//float Score = reduceFlt<128>(d_tempLogP, nPoints);
Output(" Score: Raw %f + Penalty %f = %f\n", Score, penalty, Score + penalty);
return Score + penalty;
}
}
// Initialise starting conditions by selecting unique masks at random
void KK::StartingConditionsFromMasks()
{
int nClusters2start = 0; //SNK To replace nStartingClusters within this variable only
//if (Debug)
// Output("StartingConditionsFromMasks: ");
Output("Starting initial clusters from distinct float masks \n ");
if (nStartingClusters <= 1) // If only 1 starting clutser has been requested, assign all the points to cluster 0
{
for (int p = 0; p<nPoints; p++)
Class[p] = 0;
}
else
{
int num_masks = 0;
for (int p = 0; p<nPoints; p++)
num_masks += (int)SortedMaskChange[p];
if ((nStartingClusters - 1)>num_masks)
{
Error("Not enough masks (%d) to generate starting clusters (%d), "
"so starting with (%d) clusters instead.\n", (int)num_masks,
(int)nStartingClusters, (int)(num_masks + 1));
nClusters2start = num_masks + 1;
//return;
}
else
{
nClusters2start = nStartingClusters;
}
// Construct the set of all masks
vector<bool> MaskUsed;
vector<int> MaskIndex(nPoints);
vector<int> MaskPointIndex;
int current_mask_index = -1;
for (int q = 0; q<nPoints; q++)
{
int p = SortedIndices[q];
if (q == 0 || SortedMaskChange[p])
{
current_mask_index++;
MaskUsed.push_back(false);
MaskPointIndex.push_back(p);
}
MaskIndex[p] = current_mask_index;
}
// Select points at random until we have enough masks
int masks_found = 0;
vector<int> MaskIndexToUse;
vector<int> FoundMaskIndex(num_masks);
while (masks_found<nClusters2start - 1)
{
int p = irand(0, nPoints - 1);
int mask_index = MaskIndex[p];
if (!MaskUsed[mask_index])
{
MaskIndexToUse.push_back(mask_index);
MaskUsed[mask_index] = true;
FoundMaskIndex[mask_index] = masks_found;
masks_found++;
}
}
// Assign points to clusters based on masks
for (int p = 0; p<nPoints; p++)
{
if (MaskUsed[MaskIndex[p]]) // we included this points mask
Class[p] = FoundMaskIndex[MaskIndex[p]] + 1; // so assign class to mask index
else // this points mask not included
{
// so find closest match
int closest_index = 0;
int distance = nDims + 1;
vector<int> possibilities;
for (int mi = 0; mi<nClusters2start - 1; mi++)
{
int mip = MaskPointIndex[MaskIndexToUse[mi]];
// compute mask distance
int curdistance = 0;
for (int i = 0; i<nDims; i++)
if (GetMasks(p*nDims + i) != GetMasks(mip*nDims + i))
curdistance++;
if (curdistance<distance)
{
possibilities.clear();
distance = curdistance;
}
if (curdistance == distance)
possibilities.push_back(mi);
}
if ((MaskStarts > 0) || AssignToFirstClosestMask)
closest_index = possibilities[0];
else
closest_index = possibilities[irand(0, possibilities.size() - 1)];
Class[p] = closest_index + 1;
}
}
// print some info
Output("Assigned %d initial classes from %d unique masks.\n",
(int)nClusters2start, (int)num_masks);
// Dump initial random classes to a file - knowledge of maskstart configuration may be useful
// TODO: remove this for final version - SNK: actually it is a nice idea to keep this
char fname[STRLEN];
FILE *fp;
sprintf(fname, "%s.initialclusters.%d.clu.%d", FileBase, (int)nClusters2start, (int)ElecNo);
fp = fopen_safe(fname, "w");
fprintf(fp, "%d\n", (int)nClusters2start);
for (int p = 0; p<nPoints; p++)
fprintf(fp, "%d\n", (int)Class[p]);
fclose(fp);
}
for (int c = 0; c<MaxPossibleClusters; c++)
ClassAlive[c] = (c<nClusters2start);
hipMemcpy(d_Class, &Class[0], nPoints*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_ClassAlive, &ClassAlive[0], MaxPossibleClusters* sizeof(int), hipMemcpyHostToDevice);
}
//======================================CEM step======================================================
__global__ void updataTempOldClass(int nPoints, int *d_Class, int *d_OldClass, float *d_tempOldClass){
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
if (d_Class[tidx] != d_OldClass[tidx])
d_tempOldClass[tidx] = 1.0;
else d_tempOldClass[tidx] = 0.0;
}
}
// CEM(StartFile) - Does a whole CEM algorithm from a random start or masked start
// whereby clusters are assigned according to the similarity of their masks
// optional start file loads this cluster file to start iteration
// if Recurse is 0, it will not try and split.
// if InitRand is 0, use cluster assignments already in structure
float KK::CEM(char *CluFile, int Recurse, int InitRand,
bool allow_assign_to_noise)
{
int p;
int nChanged;
int Iter;
//vector<int> OldClass(nPoints);
//thrust::device_vector<int> d_OldClass(nPoints);
float Score, OldScore;
int LastStepFull; // stores whether the last step was a full one
int DidSplit = 0;
if (Debug)
{
Output("Entering CEM \n");
}
int time1 = clock();
if (CluFile && *CluFile)
LoadClu(CluFile);
else if (InitRand)
{
// initialize data to random
if ((MaskStarts || UseMaskedInitialConditions) && (UseDistributional) && Recurse)
StartingConditionsFromMasks();//2.5s
}
CopyHostToDevice();
float preComputeTime = (clock() - time1) / (float)CLOCKS_PER_SEC;
Output("==========================preComputeTime is : %f\n", preComputeTime);
//int sizeI = sizeof(int);
//int sizeF = sizeof(float);
//gpuErrchk(hipMemcpy(d_nClassMembers, &nClassMembers[0], MaxPossibleClusters*sizeI, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_AliveIndex, &AliveIndex[0], MaxPossibleClusters*sizeI, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_ClusterMask, &ClusterMask[0], MaxPossibleClusters*nDims*sizeF, hipMemcpyHostToDevice));
//gpuErrchk(hipMemcpy(d_Mean, &Mean[0], MaxPossibleClusters*nDims*sizeF, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_Weight, &Weight[0], MaxPossibleClusters*sizeF, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_LogP, &LogP[0], MaxPossibleClusters*nPoints*sizeF, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_OldClass, &OldClass[0], nPoints*sizeI, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_ClassPenalty, &ClassPenalty[0], MaxPossibleClusters*sizeF, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_Class2, &Class2[0], nPoints*sizeI, hipMemcpyHostToDevice));//==
//gpuErrchk(hipMemcpy(d_BestClass, &BestClass[0], nPoints*sizeI, hipMemcpyHostToDevice));//==
// set all classes to alive
Reindex();
// main loop
Iter = 0;
FullStep = 1;
Score = 0.0;
do {
Output("this is %d cycle...\n", Iter);
//========================
if (useCpu) for (p = 0; p < nPoints; p++) OldClass[p] = Class[p];
else
hipMemcpy(d_OldClass, d_Class, nPoints*sizeof(int), hipMemcpyDeviceToDevice);
//========================
// M-step - calculate class weights, means, and covariance matrices for each class
// E-step - calculate scores for each point to belong to each class
int time3 = clock();
MEstep();
Output("==========================MEstep : %f\n", (clock() - time3) / (float)CLOCKS_PER_SEC);
int time4 = clock();
// C-step - choose best class for each
CStep(allow_assign_to_noise);
// Compute class penalties
ComputeClassPenalties();
// Would deleting any classes improve things?
if (Recurse) ConsiderDeletion();
//================================
// Calculate number changed
nChanged = 0;
if(useCpu) for (p = 0; p < nPoints; p++) nChanged += (OldClass[p] != Class[p]);
else {
updataTempOldClass << < (nPoints+BLOCKDIM - 1)/ BLOCKDIM,BLOCKDIM>> > (nPoints,d_Class,d_OldClass,d_tempOldClass);
float nchanged;
hipblasSasum(handle, nPoints,d_tempOldClass, 1, &nchanged);
nChanged = (int)nchanged;//reduceInt<128>(d_tempOldClass, nPoints);
}
//===============================
//Compute elapsed time
timesofar = (clock() - Clock0) / (float)CLOCKS_PER_SEC;
//Output("\nTime so far%f seconds.\n", timesofar);
//Write start of Output to klg file
if (Verbose >= 1)
{
if (Recurse == 0) Output("\t\tSP:");
if ((Recurse != 0) || (SplitInfo == 1 && Recurse == 0))
Output("Iteration %d%c (%f sec): %d clusters\n",
(int)Iter, FullStep ? 'F' : 'Q', timesofar, (int)nClustersAlive);
}
// Calculate score
OldScore = Score;
Score = ComputeScore();
Output("==========================other step : %f\n", (clock() - time4) / (float)CLOCKS_PER_SEC);
int time5 = clock();
//Finish Output to klg file with Score already returned via the ComputeScore() function
if (Verbose >= 1)
{
Output(" nChanged %d\n", (int)nChanged);
}
//if(Verbose>=1)
//{
// if(Recurse==0) Output("\t");
// Output(" Iteration %d%c: %d clusters Score %.7g nChanged %d\n",
// (int)Iter, FullStep ? 'F' : 'Q', (int)nClustersAlive, Score, (int)nChanged);
//}
Iter++;
numiterations++;
global_numiterations++;
iteration_metric2 += (float)(nDims*nDims)*(float)(nPoints);
iteration_metric3 += (float)(nDims*nDims)*(float)(nDims*nPoints);
//if (Debug)
//{
// for (p = 0; p<nPoints; p++) BestClass[p] = Class[p];
// SaveOutput();
// Output("Press return");
// getchar();
//}
// Next step a full step?
LastStepFull = FullStep;
FullStep = (
nChanged > ChangedThresh*nPoints
|| nChanged == 0
|| Iter%FullStepEvery == 0
|| Score > OldScore // SNK: Resurrected
//SNK Score decreases ARE because of quick steps!
);
if (Iter > MaxIter)
{
Output("Maximum iterations exceeded\n");
break;
}
//Save a temporary clu file when not splitting
if ((SaveTempCluEveryIter && Recurse) && (OldScore > Score))
{
//SaveTempOutput(); //SNK Saves a temporary Output clu file on each iteration
Output("Writing temp clu file \n");
Output("Because OldScore, %f, is greater than current (better) Score,%f \n ", OldScore, Score);
}
// try splitting
//int mod = (abs(Iter-SplitFirst))%SplitEvery;
//Output("\n Iter mod SplitEvery = %d\n",(int)mod);
//Output("Iter-SplitFirst %d \n",(int)(Iter-SplitFirst));
//if ((Recurse && SplitEvery > 0) && (Iter == SplitFirst || (Iter >= SplitFirst + 1 && (Iter - SplitFirst) % SplitEvery == SplitEvery - 1) || (nChanged == 0 && LastStepFull)))
//{
// if (OldScore > Score) //This should be trivially true for the first run of KlustaKwik
// {
// //SaveTempOutput(); //SNK Saves a temporary Output clu file before each split
// Output("Writing temp clu file \n");
// Output("Because OldScore, %f, is greater than current (better) Score,%f \n ", OldScore, Score);
// }
// DidSplit = TrySplits();
//}
//else DidSplit = 0;
//Output("==========================trysplit : %f\n", (clock() - time5) / (float)CLOCKS_PER_SEC);
} while (nChanged > 0 || !LastStepFull || DidSplit);
if (DistDump) fprintf(Distfp, "\n");
return Score;
}
// does the two-step clustering algorithm:
// first make a subset of the data, to SubPoints points
// then run CEM on this
// then use these clusters to do a CEM on the full data
// It calls CEM whenever there is no initialization clu file (i.e. the most common usage)
float KK::Cluster(char *StartCluFile = NULL)
{
if (Debug)
{
Output("Entering Cluster \n");
}
if (Subset <= 1)
{ // don't subset
Output("------ Clustering full data set of %d points ------\n", (int)nPoints);
return CEM(NULL, 1, 1);
}
//// otherwise run on a subset of points
//int sPoints = nPoints / Subset; // number of subset points - int division will round down
//vector<int> SubsetIndices(sPoints);
//for (int i = 0; i<sPoints; i++)
// // choose point to include, evenly spaced plus a random offset
// SubsetIndices[i] = Subset*i + irand(0, Subset - 1);
//KK KKSub = KK(*this, SubsetIndices);
//// run CEM algorithm on KKSub
//Output("------ Running on subset of %d points ------\n", (int)sPoints);
//KKSub.CEM(NULL, 1, 1);
//// now copy cluster shapes from KKSub to main KK
//Weight = KKSub.Weight;
//Mean = KKSub.Mean;
//Cov = KKSub.Cov;
//DynamicCov = KKSub.DynamicCov;
//ClassAlive = KKSub.ClassAlive;
//nClustersAlive = KKSub.nClustersAlive;
//AliveIndex = KKSub.AliveIndex;
//// Run E and C steps on full data set
//Output("------ Evaluating fit on full set of %d points ------\n", (int)nPoints);
//if (UseDistributional)
// ComputeClusterMasks(); // needed by E-step normally computed by M-step
////EStep();
//CStep();
//// compute score on full data set and leave
//return ComputeScore();
}
// Initialise by loading data from files
KK::KK(char *FileBase, int ElecNo, char *UseFeatures,
float PenaltyK, float PenaltyKLogN, int PriorPoint)
{
hipblasCreate(&handle);
hipsolverDnCreate(&solver_handle);
side = HIPBLAS_SIDE_LEFT;
uplo = HIPBLAS_FILL_MODE_UPPER;
trans = HIPBLAS_OP_T;
diag = HIPBLAS_DIAG_NON_UNIT;
KK_split = NULL;
K2_container = NULL;
penaltyK = PenaltyK;
penaltyKLogN = PenaltyKLogN;
LoadData(FileBase, ElecNo, UseFeatures);
priorPoint = PriorPoint;
//NOTE: penaltyK, penaltyKlogN, priorPoint, lower case versions of global variable PenaltyK PenaltyKLogN and PriorPoint
DoInitialPrecomputations();//Now DoPrecomputations is only invoked in the initialization
numiterations = 0;
init_type = 0;
}
// This function is used by both of the constructors below, it initialises
// the data from a source KK object with a subset of the indices.
//used trysplit() --
void KK::ConstructFrom(const KK &Source, const vector<int> &Indices)
{
KK_split = NULL;
K2_container = NULL;
nDims = Source.nDims;
nDims2 = nDims*nDims;
nPoints = Indices.size();
penaltyK = Source.penaltyK;
penaltyKLogN = Source.penaltyKLogN;
priorPoint = Source.priorPoint;
nStartingClusters = Source.nStartingClusters;
NoisePoint = Source.NoisePoint;
FullStep = Source.FullStep;
nClustersAlive = Source.nClustersAlive;
numiterations = Source.numiterations;
//define cublas and cusolver handle
handle = Source.handle;
solver_handle = Source.solver_handle;
side = Source.side;
uplo = Source.uplo;
trans = Source.trans;
diag = Source.diag;
AllocateArrays(); // ===========================Set storage for all the arrays such as Data, FloatMasks, Weight, Mean, Cov, etc.
if (Debug)
{
Output("Entering ConstructFrom: \n");
}
// fill with a subset of points
for (int p = 0; p<nPoints; p++)
{
int psource = Indices[p];
//copy data and masks
for (int d = 0; d<nDims; d++)
Data[p*nDims + d] = Source.Data[psource*nDims + d];
if (Source.Masks.size()>0)
{
for (int d = 0; d<nDims; d++)
Masks[p*nDims + d] = Source.Masks[psource*nDims + d];
}
if (UseDistributional)
{
for (int d = 0; d<nDims; d++)
{
FloatMasks[p*nDims + d] = Source.FloatMasks[psource*nDims + d];
}
}
UnMaskDims[p] = Source.UnMaskDims[psource];
}
//Output(" Printing Source.NoiseVariance[2] = %f",Source.NoiseVariance[2]);
if (UseDistributional)
{
NoiseMean.resize(nDims);
NoiseVariance.resize(nDims);
nMasked.resize(nDims);
for (int d = 0; d<nDims; d++)
{
NoiseMean[d] = Source.NoiseMean[d];
NoiseVariance[d] = Source.NoiseVariance[d];
nMasked[d] = Source.nMasked[d];
}
}
DoPrecomputations();
//Output(" Printing Source.NoiseMean[2] = %f",NoiseVariance[2]);
numiterations = 0;
}
void KK::ConstructFrom(const KK &Source)
{
vector<int> Indices(Source.nPoints);
for (int i = 0; i<Source.nPoints; i++)
Indices[i] = i;
ConstructFrom(Source, Indices);
}
KK::KK(const KK &Source, const vector<int> &Indices)
{
ConstructFrom(Source, Indices);
init_type = 2;
}
// If we don't specify an index subset, use everything.
//invoke in the trysplit step
KK::KK(const KK &Source)
{
ConstructFrom(Source);
init_type = 1;
}
KK::~KK()
{
if (KK_split) delete KK_split;
KK_split = NULL;
if (K2_container) delete K2_container;
K2_container = NULL;
//hipblasDestroy(handle);
//hipsolverDnDestroy(solver_handle);
}
// Main loop
//int main(int argc, char **argv)
int main()
{
float Score;
float BestScore = HugeScore;
int p, i;
char fname[STRLEN];
if (Log) {
sprintf(fname, "%s.klg.%d", FileBase, (int)ElecNo);
logfp = fopen_safe(fname, "w");
}
//SetupParams((int)argc, argv); // This function is defined in parameters.cpp
//getchar();
Output("Starting KlustaKwik. Version: %s\n", VERSION);
//if (RamLimitGB == 0.0)
//{
// RamLimitGB = (1.0*total_physical_memory()) / (1024.0*1024.0*1024.0);
// Output("Setting RAM limit to total physical memory, %.2f GB.\n", (double)RamLimitGB);
//}
//else if (RamLimitGB < 0.0)
//{
// RamLimitGB = 1e20;
// Output("WARNING: You have chosen not to set a RAM limit, this may cause problems.\n");
//}
//clock_t Clock0 = clock();
Clock0 = clock();
clock_t etime = 0;
clock_t mtime = 0;
#ifdef _OPENMP
double start_time = omp_get_wtime();
#endif
// The main KK object, loads the data and does some precomputations
KK K1(FileBase, ElecNo, UseFeatures, PenaltyK, PenaltyKLogN, PriorPoint);
int constructor = (clock() - Clock0) / (float)CLOCKS_PER_SEC;
Output("Time taken for constructor:%f seconds.\n", constructor);
if (UseDistributional && SaveSorted) //Bug fix (Classical KK would terminate here)
K1.SaveSortedData();
Output("\nFileBase : %s\n ----------------------------------------------------------------\n", FileBase);
// Seed random number generator
srand((unsigned int)RandomSeed);
// open distance dump file if required
if (DistDump) Distfp = fopen("DISTDUMP", "w");
// start with provided file, if required
if (*StartCluFile)
{
Output("\nStarting from cluster file %s\n", StartCluFile);
float iterationtime = (float)clock();
BestScore = K1.CEM(StartCluFile, 1, 1); //Main computation
iterationtime = (clock() - iterationtime) / (float)CLOCKS_PER_SEC;
Output("Time taken for this iteration:%f seconds.\n", iterationtime);
Output(" %d->%d Clusters: Score %f\n\n", (int)K1.nStartingClusters, (int)K1.nClustersAlive, BestScore);
for (p = 0; p<K1.nPoints; p++)
K1.BestClass[p] = K1.Class[p];
K1.SaveOutput();
}
else
{
// loop through numbers of clusters ...
K1.nStartingClusters = MaskStarts;
// do CEM iteration
Output("\nStarting from %d clusters...\n", (int)K1.nStartingClusters);
float iterationtime = (float)clock();
Score = K1.Cluster(); //Main computation
iterationtime = (clock() - iterationtime) / (float)CLOCKS_PER_SEC;
Output("Time taken for this iteration:%f seconds.\n", iterationtime);
Output(" %d->%d Clusters: Score %f, best is %f\n", (int)K1.nStartingClusters, (int)K1.nClustersAlive, Score, BestScore);
if (Score < BestScore)
{
Output("THE BEST YET!\n"); // New best classification found
BestScore = Score;
hipMemcpy(&K1.BestClass[0], K1.d_Class, K1.nPoints * sizeof(int), hipMemcpyDeviceToHost);
//for (p = 0; p < K1.nPoints; p++)
//K1.BestClass[p] = K1.Class[p];
K1.SaveOutput();
}
Output("\n");
}
K1.SaveOutput();
K1.FreeArray();
hipDeviceReset();
#ifdef _OPENMP
float tottime = omp_get_wtime() - start_time;
#else
float tottime = (clock() - Clock0) / (float)CLOCKS_PER_SEC;
#endif
Output("E step: %d (time per iteration =%f ms)\n",
(int)K1.numiterations,
1e3*etime / (float)CLOCKS_PER_SEC / K1.numiterations);
Output("M step: %d (time per iteration =%f ms)\n",
(int)K1.numiterations,
1e3*mtime / (float)CLOCKS_PER_SEC / K1.numiterations);
Output("Main iterations: %d (time per iteration =%f ms)\n",
(int)K1.numiterations,
1e3*tottime / K1.numiterations);
Output("Total iterations: %d (time per iteration =%f ms)\n",
(int)global_numiterations,
1e3*tottime / global_numiterations);
Output("\nDef. Iteration metric 2:\nIteration_metric2 += (float)(nDims*nDims)*(float)(nPoints)\n");
Output("Iterations metric 2: %f (time per metric unit =%fns)\n",
iteration_metric2,
1e9*tottime / iteration_metric2);
Output("\nDef. Iteration metric 3:\nIteration_metric3 += (float)(nDims*nDims)*(float)(nDims*nPoints)\n");
Output("Iterations metric 3: %f (time per metric unit=%fps)\n",
iteration_metric3,
1e12*tottime / iteration_metric3);
Output("\nThat took %f seconds.\n", tottime);
if (DistDump) fclose(Distfp);
//Output("maxsize = %d\n", maxsize);
//getchar();
return 0;
}
| f612826ab21b69ea7fa5d1879504c8c18d9e7e5a.cu | // MaskedKlustaKwik2.C
//
// Fast clustering using the CEM algorithm with Masks.
# pragma warning (disable:4819)
#ifndef VERSION
#define VERSION "0.3.0-nogit"
#endif
// Disable some Visual Studio warnings
#define _CRT_SECURE_NO_WARNINGS
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cublas_v2.h>
#include <cusolverDn.h>
#include <stdio.h>
#include <iostream>
#include "klustakwik.h"
#include "util.h"
#include<stdlib.h>
#define _USE_MATH_DEFINES
#include<math.h>
#define BLOCKDIM 128
#ifdef _OPENMP
#include<omp.h>
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = false) {
if (code != cudaSuccess) {
Output("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
else {
//printf("cuda returned code == cudaSuccess\n");
}
}
// GLOBAL VARIABLES
FILE *Distfp;
int global_numiterations = 0;
float iteration_metric2 = (float)0;
float iteration_metric3 = (float)0;
clock_t Clock0;
float timesofar;
//===========================================================================================
template<class T>
inline void resize_and_fill_with_zeros(vector<T> &x, int newsize)
{
if (x.size() == 0)
{
x.resize((unsigned int)newsize);
return;
}
if (x.size() > (unsigned int)newsize)
{
fill(x.begin(), x.end(), (T)0);
x.resize((unsigned int)newsize);
}
else
{
x.resize((unsigned int)newsize);
fill(x.begin(), x.end(), (T)0);
}
}
//===========================================init d_ones========================================================//
__global__ void init_dones(int nDims, float *d_ones) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < 2000) d_ones[tid] = 1.0;
}
// Sets storage for KK class. Needs to have nDims and nPoints defined
void KK::AllocateArrays() {
nDims2 = nDims*nDims;
NoisePoint = 1; // Ensures that the mixture weight for the noise cluster never gets to zero
// Set sizes for arrays
resize_and_fill_with_zeros(Data, nPoints * nDims);
resize_and_fill_with_zeros(Masks, nPoints * nDims);
resize_and_fill_with_zeros(FloatMasks, nPoints * nDims);
resize_and_fill_with_zeros(UnMaskDims, nPoints); //SNK Number of unmasked dimensions for each data point when using float masks $\sum m_i$
resize_and_fill_with_zeros(Weight, MaxPossibleClusters);
resize_and_fill_with_zeros(Mean, MaxPossibleClusters*nDims);
resize_and_fill_with_zeros(LogP, MaxPossibleClusters*nPoints);
resize_and_fill_with_zeros(Class, nPoints);
resize_and_fill_with_zeros(OldClass, nPoints);
resize_and_fill_with_zeros(Class2, nPoints);
resize_and_fill_with_zeros(BestClass, nPoints);
resize_and_fill_with_zeros(ClassAlive, MaxPossibleClusters);
resize_and_fill_with_zeros(AliveIndex, MaxPossibleClusters);
resize_and_fill_with_zeros(ClassPenalty, MaxPossibleClusters);
resize_and_fill_with_zeros(nClassMembers, MaxPossibleClusters);
resize_and_fill_with_zeros(CorrectionTerm, nPoints * nDims);
resize_and_fill_with_zeros(ClusterMask, MaxPossibleClusters*nDims);
resize_and_fill_with_zeros(Offset, MaxPossibleClusters);
//==============================GPU Allocate==============================
//int sizeI = sizeof(int);
//int sizeF = sizeof(float);
//gpuErrchk(cudaMalloc((void **)&d_Class, nPoints*sizeI));
//gpuErrchk(cudaMalloc((void **)&d_Data, nPoints*nDims*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_Masks, nPoints*nDims*sizeof(int)));
//gpuErrchk(cudaMalloc((void **)&d_nClassMembers, MaxPossibleClusters*sizeI));
//gpuErrchk(cudaMalloc((void **)&d_ClassAlive, MaxPossibleClusters*sizeI));
//gpuErrchk(cudaMalloc((void **)&d_AliveIndex, MaxPossibleClusters*sizeI));
//gpuErrchk(cudaMalloc((void **)&d_NoiseMean, nDims*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_NoiseVariance, nDims*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_CorrectionTerm, nPoints*nDims*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_FloatMasks, nPoints*nDims*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_UnMaskDims, nPoints*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_ClusterMask, MaxPossibleClusters*nDims*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_Mean, MaxPossibleClusters*nDims*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_Weight, MaxPossibleClusters*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_LogP, MaxPossibleClusters*nPoints*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_OldClass, nPoints*sizeI));
//gpuErrchk(cudaMalloc((void **)&d_ClassPenalty, MaxPossibleClusters*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_Class2, nPoints*sizeI));
//gpuErrchk(cudaMalloc((void **)&d_BestClass, nPoints*sizeI));
////temp variables
//gpuErrchk(cudaMalloc((void **)&d_ClassAliveTemp, MaxPossibleClusters*sizeI));
//gpuErrchk(cudaMalloc((void **)&d_DeletionLoss, MaxPossibleClusters*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_tempSubtraction, MaxPossibleClusters*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_tempLogP, nPoints*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_tempOldClass, nPoints*sizeF));
////MEstep
//gpuErrchk(cudaMalloc((void **)&d_unmaskedSolver, nPoints*sizeF));
//gpuErrchk(cudaMalloc((void **)&d_AllVector2Mean, nPoints*nDims * sizeof(float)));
//gpuErrchk(cudaMalloc((void **)&d_Current, MaxPossibleClusters * nDims * sizeof(int)));
//gpuErrchk(cudaMalloc((void **)&d_PointsInThisClass, nPoints * sizeof(int)));
//gpuErrchk(cudaMalloc((void **)&d_MarkClass, nPoints * sizeof(int)));
//gpuErrchk(cudaMalloc((void **)&d_Offset, MaxPossibleClusters * sizeof(int)));
////for loop E step
//gpuErrchk(cudaMalloc((void **)&d_pIndex, nPoints * sizeof(int)));
//gpuErrchk(cudaMalloc((void **)&d_points2Mean, nPoints*nDims * sizeof(float)));
//gpuErrchk(cudaMalloc((void **)&d_InvCovDiag, nDims * sizeof(float)));
//gpuErrchk(cudaMalloc((void **)&d_temp, nDims * sizeof(float)));
//gpuErrchk(cudaMalloc((void **)&d_updatePointsList, nPoints * sizeof(int)));
//
gpuErrchk(cudaMalloc((void **)&d_ones, 2000 * sizeof(float)));
init_dones << <(2000 + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >> > (nDims, d_ones);
}
// Penalty for standard CEM
// Penalty(nAlive) returns the complexity penalty for that many clusters
// bearing in mind that cluster 0 has no free params except p.
float KK::Penalty(int n)
{
int nParams;
if (n == 1)
return 0;
nParams = (nDims*(nDims + 1) / 2 + nDims + 1)*(n - 1); // each has cov, mean, &p
float p = penaltyK*(float)(nParams) // AIC units (Spurious factor of 2 removed from AIC units on 09.07.13)
+penaltyKLogN*((float)nParams*(float)log((float)nPoints) / 2); // BIC units
return p;
}
//======================================ComputeClassPenalties================================================
__global__ void c_nnClassMembers(int nPoints, int *d_Class, int *d_nClassMembers) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
atomicAdd(&d_nClassMembers[d_Class[tidx]], 1);
}
}
__global__ void initClassPenalty(int MaxPossibleClusters, float *d_ClassPenalty) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < MaxPossibleClusters)
d_ClassPenalty[tidx] = (float)0;
}
__global__ void updateClassPenalty(int nPoints, int *d_Class, float *d_UnMaskDims,float *d_ClassPenalty) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
float n = d_UnMaskDims[tidx];
atomicAdd(&d_ClassPenalty[d_Class[tidx]], (n*(n + 1) / 2 + n + 1));
}
}
__global__ void computeClassPenalty(int MaxPossibleClusters, int nPoints,float penaltyK,float penaltyKLogN,
int *d_nClassMembers, float *d_ClassPenalty) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < MaxPossibleClusters) {
if (d_nClassMembers[tidx]>0)
d_ClassPenalty[tidx] /= (float)d_nClassMembers[tidx];
float nParams = d_ClassPenalty[tidx];
d_ClassPenalty[tidx] = penaltyK*(float)(nParams * 2)
+ penaltyKLogN*((float)nParams*(float)log((float)nPoints) / 2);
}
}
// Penalties for Masked CEM
void KK::ComputeClassPenalties()
{
if (useCpu) {
// Output("ComputeClassPenalties: Correct if UseDistributional only");
for (int c = 0; c < MaxPossibleClusters; c++)
ClassPenalty[c] = (float)0;
// compute sum of nParams for each
vector<int> NumberInClass(MaxPossibleClusters);
for (int p = 0; p < nPoints; p++)
{
int c = Class[p];
NumberInClass[c]++;
// int n = UnmaskedInd[p+1]-UnmaskedInd[p]; // num unmasked dimensions
float n = UnMaskDims[p];
float nParams = n*(n + 1) / 2 + n + 1;
ClassPenalty[c] += nParams;
}
// compute mean nParams for each cluster
for (int c = 0; c < MaxPossibleClusters; c++)
if (NumberInClass[c] > 0)
ClassPenalty[c] /= (float)NumberInClass[c];
// compute penalty for each cluster
for (int c = 0; c < MaxPossibleClusters; c++)
{
float nParams = ClassPenalty[c];
ClassPenalty[c] = penaltyK*(float)(nParams * 2)
+ penaltyKLogN*((float)nParams*(float)log((float)nPoints) / 2);
}
}
//=======================================GPU code======================================
else {
initClassPenalty << <(MaxPossibleClusters+BLOCKDIM - 1)/ BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, d_ClassPenalty);
updateClassPenalty << <(nPoints+ BLOCKDIM - 1)/ BLOCKDIM, BLOCKDIM >> > (nPoints,d_Class,d_UnMaskDims,d_ClassPenalty);
computeClassPenalty << <(MaxPossibleClusters + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, nPoints, penaltyK, penaltyKLogN,
d_nClassMembers,d_ClassPenalty);
//Output("d_ClassPenalty.size: %d\n", d_ClassPenalty.size());
//for (int i = 0; i < d_ClassPenalty.size(); i++) std::cout << d_ClassPenalty[i] << " ";
//Output("\n");
}
}
//===========================================CStep==================================================
__global__ void d_cstep(int MaxPossibleClusters, int nPoints, bool allow_assign_to_noise, int nClustersAlive, float HugeScore,
int *d_OldClass, int *d_Class, int *d_Class2, int *d_AliveIndex, float *d_LogP) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < nPoints)
{
d_OldClass[tid] = d_Class[tid];
float BestScore = HugeScore;
float SecondScore = HugeScore;
float ThisScore;
int TopClass = 0;
int SecondClass = 0;
int ccstart = 0, c;
if (!allow_assign_to_noise)
ccstart = 1;
for (int cc = ccstart; cc<nClustersAlive; cc++)
{
c = d_AliveIndex[cc];
ThisScore = d_LogP[tid*MaxPossibleClusters + c];
if (ThisScore < BestScore)
{
SecondClass = TopClass;
TopClass = c;
SecondScore = BestScore;
BestScore = ThisScore;
}
else if (ThisScore < SecondScore)
{
SecondClass = c;
SecondScore = ThisScore;
}
}
d_Class[tid] = TopClass;
d_Class2[tid] = SecondClass;
}
}
// Choose best class for each point (and second best) out of those living
void KK::CStep(bool allow_assign_to_noise)
{
if (useCpu) {
int p, c, cc, TopClass, SecondClass;
int ccstart = 0;
if (!allow_assign_to_noise)
ccstart = 1;
float ThisScore, BestScore, SecondScore;
for (p = 0; p < nPoints; p++)
{
OldClass[p] = Class[p];
BestScore = HugeScore;
SecondScore = HugeScore;
TopClass = SecondClass = 0;
for (cc = ccstart; cc < nClustersAlive; cc++)
{
c = AliveIndex[cc];
ThisScore = LogP[p*MaxPossibleClusters + c];
if (ThisScore < BestScore)
{
SecondClass = TopClass;
TopClass = c;
SecondScore = BestScore;
BestScore = ThisScore;
}
else if (ThisScore < SecondScore)
{
SecondClass = c;
SecondScore = ThisScore;
}
}
Class[p] = TopClass;
Class2[p] = SecondClass;
}
}
//=====================================GPUcode=======================================
else {
d_cstep << <(nPoints + BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, nPoints, allow_assign_to_noise, nClustersAlive, HugeScore,
d_OldClass,d_Class,d_Class2,d_AliveIndex,d_LogP);
/*
Output("d_OldClass.size: %d\n", d_OldClass.size());
for (int i = 0; i < d_OldClass.size(); i++) std::cout << d_OldClass[i] << " ";
Output("\n");
Output("d_Class.size: %d\n", d_Class.size());
for (int i = 0; i < d_Class.size(); i++) std::cout << d_Class[i] << " ";
Output("\n");
Output("d_Class2.size: %d\n", d_Class2.size());
for (int i = 0; i < d_Class2.size(); i++) std::cout << d_Class2[i] << " ";
Output("\n");
*/
}
}
//======================================ConsiderDeletion============================================
__global__ void initDeletionLoss(int MaxPossibleClusters, float HugeScore, int *d_ClassAlive,float *d_DeletionLoss) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx > 0 && tidx < MaxPossibleClusters) {
if (d_ClassAlive[tidx]) d_DeletionLoss[tidx] = 0;
else d_DeletionLoss[tidx] = HugeScore;
}
}
__global__ void computeDeletionLoss(int nPoints, int MaxPossibleClusters,int *d_Class, int *d_Class2,float *d_LogP, float *d_DeletionLoss) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
float s = d_LogP[tidx*MaxPossibleClusters + d_Class2[tidx]] - d_LogP[tidx*MaxPossibleClusters + d_Class[tidx]];
atomicAdd(&d_DeletionLoss[d_Class[tidx]], s);
}
}
__global__ void subtractionLoss(int MaxPossibleClusters, float HugeScore,float *d_ClassPenalty,float *d_DeletionLoss,float *d_tempSubtraction) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < MaxPossibleClusters) {
if (tidx == 0)d_tempSubtraction[tidx] = HugeScore;
else d_tempSubtraction[tidx] = d_DeletionLoss[tidx] - d_ClassPenalty[tidx];
}
}
__global__ void reallocatePoints(int nPoints, int CandidateClass, int* d_ClassAlive, int *d_Class, int *d_Class2){
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
if (d_Class[tidx] == CandidateClass)
d_Class[tidx] = d_Class2[tidx];
if(tidx == 0)
d_ClassAlive[CandidateClass] = 0;
}
}
struct KeyValue {
int id;
float v;
};
//<<<1,512>>> so the array max length is 1024,single block;
__global__ void findMin(int n, float *d_s, KeyValue *d_result) {
extern __shared__ KeyValue mixdata[];
int tid = threadIdx.x;
mixdata[tid].id = tid;
mixdata[tid].v = 100000000.0;
if (tid + blockDim.x < n) {
if (d_s[tid] > d_s[tid + blockDim.x]) mixdata[tid].v = d_s[tid + blockDim.x], mixdata[tid].id = mixdata[tid + blockDim.x].id;
else mixdata[tid].v = d_s[tid];
}__syncthreads();
if (tid < 256) {
if (mixdata[tid].v > mixdata[tid + 256].v) mixdata[tid].v = mixdata[tid + 256].v, mixdata[tid].id = mixdata[tid + 256].id;
} __syncthreads();
if (tid < 128) {
if (mixdata[tid].v > mixdata[tid + 128].v) mixdata[tid].v = mixdata[tid + 128].v, mixdata[tid].id = mixdata[tid + 128].id;
} __syncthreads();
if (tid <64) {
if (mixdata[tid].v > mixdata[tid + 64].v) mixdata[tid].v = mixdata[tid + 64].v, mixdata[tid].id = mixdata[tid + 64].id;
} __syncthreads();
if (tid < 32) {
if (mixdata[tid].v > mixdata[tid + 32].v) mixdata[tid].v = mixdata[tid + 32].v, mixdata[tid].id = mixdata[tid + 32].id;
if (mixdata[tid].v > mixdata[tid + 16].v) mixdata[tid].v = mixdata[tid + 16].v, mixdata[tid].id = mixdata[tid + 16].id;
if (mixdata[tid].v > mixdata[tid + 8].v) mixdata[tid].v = mixdata[tid + 8].v, mixdata[tid].id = mixdata[tid + 8].id;
if (mixdata[tid].v > mixdata[tid + 4].v) mixdata[tid].v = mixdata[tid + 4].v, mixdata[tid].id = mixdata[tid + 4].id;
if (mixdata[tid].v > mixdata[tid + 2].v) mixdata[tid].v = mixdata[tid + 2].v, mixdata[tid].id = mixdata[tid + 2].id;
if (mixdata[tid].v > mixdata[tid + 1].v) mixdata[tid].v = mixdata[tid + 1].v, mixdata[tid].id = mixdata[tid + 1].id;
}
if (tid == 0) d_result[0].v = mixdata[0].v, d_result[0].id = mixdata[0].id;
}
KeyValue findResult(int n, float *d_s) {
KeyValue *d_result;
cudaMalloc((void **)&d_result, sizeof(KeyValue));
findMin<<<1,512, 512 * sizeof(KeyValue) >>>(n, d_s, d_result);
KeyValue h_result;
cudaMemcpy(&h_result, d_result, sizeof(KeyValue), cudaMemcpyDeviceToHost);
cudaFree(d_result);
return h_result;
}
// Sometimes deleting a cluster will improve the score, when you take into account
// the BIC. This function sees if this is the case. It will not delete more than
// one cluster at a time.
void KK::ConsiderDeletion()
{
if(useCpu){
int c, p, CandidateClass = 0;
float Loss, DeltaPen;
vector<float> DeletionLoss(MaxPossibleClusters); // the increase in log P by deleting the cluster
if (Debug)
Output(" Entering ConsiderDeletion: ");
for (c = 1; c < MaxPossibleClusters; c++){
if (ClassAlive[c]) DeletionLoss[c] = 0;
else DeletionLoss[c] = HugeScore; // don't delete classes that are already there
}
// compute losses by deleting clusters
vector<int> NumberInClass(MaxPossibleClusters);
for (p = 0; p < nPoints; p++){
DeletionLoss[Class[p]] += LogP[p*MaxPossibleClusters + Class2[p]] - LogP[p*MaxPossibleClusters + Class[p]];
int ccc = Class[p];
NumberInClass[ccc]++; // For computing number of points in each class
}
// find class with smallest increase in total score
Loss = HugeScore;
if (UseDistributional) //For UseDistribution, we use the ClusterPenalty
{
for (c = 1; c < MaxPossibleClusters; c++){
if ((DeletionLoss[c] - ClassPenalty[c]) < Loss){
Loss = DeletionLoss[c] - ClassPenalty[c];
CandidateClass = c;
}
}
}// or in the case of fixed penalty find class with least to lose
// what is the change in penalty?
if (UseDistributional) //For the distributional algorithm we need to use the ClusterPenalty
DeltaPen = ClassPenalty[CandidateClass];
//Output("cand Class %d would lose %f gain is %f\n", (int)CandidateClass, Loss, DeltaPen);
// is it worth it?
//06/12/12 fixing bug introduced which considered DeltaPen twice!
if (UseDistributional) //For the distributional algorithm we need to use the ClusterPenalty
{
if (Loss < 0){
Output("Deleting Class %d (%d points): Lose %f but Gain %f\n", (int)CandidateClass, (int)NumberInClass[CandidateClass], DeletionLoss[CandidateClass], DeltaPen);
// set it to dead
ClassAlive[CandidateClass] = 0;
// re-allocate all of its points
for (p = 0; p < nPoints; p++) if (Class[p] == CandidateClass) Class[p] = Class2[p];
// recompute class penalties
ComputeClassPenalties();
}
}
Reindex();
}
//=============================================GPU code=======================================
else {
initDeletionLoss << <(MaxPossibleClusters + BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (MaxPossibleClusters, HugeScore, d_ClassAlive,d_DeletionLoss);
computeDeletionLoss << <(nPoints + BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (nPoints, MaxPossibleClusters,d_Class,d_Class2,d_LogP,d_DeletionLoss);
//compute minloss and index
subtractionLoss << <MaxPossibleClusters/BLOCKDIM + 1, BLOCKDIM >> > (MaxPossibleClusters, HugeScore,
d_ClassPenalty, d_DeletionLoss, d_tempSubtraction);
KeyValue result = findResult(MaxPossibleClusters, d_tempSubtraction);
//float minLoss = result.v;int CandidateClass = result.id;
if (result.v < 0) {
//Output("Deleting Class %d (%d points): Lose %f but Gain %f\n", (int)CandidateClass, (int)d_NumberInClass[CandidateClass], d_DeletionLoss[CandidateClass], d_ClassPenalty[CandidateClass]);
reallocatePoints << <nPoints/BLOCKDIM + 1, BLOCKDIM >> > (nPoints, result.id,
d_ClassAlive, d_Class, d_Class2);
ComputeClassPenalties();
}
Reindex();
cudaMemcpy(&AliveIndex[0], d_AliveIndex, MaxPossibleClusters*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&nClassMembers[0], d_nClassMembers, MaxPossibleClusters * sizeof(int), cudaMemcpyDeviceToHost);
/*
Output("this is in the ConsiderDeletion:=======================\n");
Output("d_AliveIndex: %d\n", d_AliveIndex.size());
for (int i = 0;i < d_AliveIndex.size();i++) cout << d_AliveIndex[i] << " ";
Output("\n");
Output("d_ClassAlive: %d\n", d_ClassAlive.size());
for (int i = 0;i < d_ClassAlive.size();i++) cout << d_ClassAlive[i] << " ";
Output("\n");
*/
}
}
// LoadClu(CluFile)
void KK::LoadClu(char *CluFile)
{
FILE *fp;
int p, c;
int val; // read in from %d
int status;
fp = fopen_safe(CluFile, "r");
status = fscanf(fp, "%d", &nStartingClusters);
nClustersAlive = nStartingClusters;// -1;
for (c = 0; c<MaxPossibleClusters; c++) ClassAlive[c] = (c<nStartingClusters);
for (p = 0; p<nPoints; p++)
{
status = fscanf(fp, "%d", &val);
if (status == EOF) Error("Error reading cluster file");
Class[p] = val - 1;
}
}
// for each cluster, try to split it in two. if that improves the score, do it.
// returns 1 if split was successful
int KK::TrySplits()
{
int c, cc, c2, p, p2, DidSplit = 0;
float Score, NewScore, UnsplitScore, SplitScore;
int UnusedCluster;
//KK K2; // second KK structure for sub-clustering
//KK K3; // third one for comparison
if (nClustersAlive >= MaxPossibleClusters - 1)
{
Output("Won't try splitting - already at maximum number of clusters\n");
return 0;
}
// set up K3 and remember to add the masks
//KK K3(*this);
if (!AlwaysSplitBimodal)
{
if (KK_split == NULL)
{
KK_split = new KK(*this);
}
else
{
// We have to clear these to bypass the debugging checks
// in precomputations.cpp
KK_split->Unmasked.clear();
KK_split->UnmaskedInd.clear();
KK_split->SortedMaskChange.clear();
KK_split->SortedIndices.clear();
// now we treat it as empty
KK_split->ConstructFrom(*this);
}
}
//KK &K3 = *KK_split;
#define K3 (*KK_split)
Output("Compute initial score before splitting: ");
Score = ComputeScore();
// loop the clusters, trying to split
for (cc = 1; cc<nClustersAlive; cc++)
{
c = AliveIndex[cc];
// set up K2 structure to contain points of this cluster only
vector<int> SubsetIndices;
for (p = 0; p<nPoints; p++)
if (Class[p] == c)
SubsetIndices.push_back(p);
if (SubsetIndices.size() == 0)
continue;
if (K2_container)
{
// We have to clear these to bypass the debugging checks
// in precomputations.cpp
K2_container->Unmasked.clear();
K2_container->UnmaskedInd.clear();
K2_container->SortedMaskChange.clear();
K2_container->SortedIndices.clear();
//K2_container->AllVector2Mean.clear();
// now we treat it as empty
K2_container->ConstructFrom(*this, SubsetIndices);
}
else
{
K2_container = new KK(*this, SubsetIndices);
}
//KK K2(*this, SubsetIndices);
KK &K2 = *K2_container;
// find an unused cluster
UnusedCluster = -1;
for (c2 = 1; c2<MaxPossibleClusters; c2++){
if (!ClassAlive[c2]){
UnusedCluster = c2;
break;
}
}
if (UnusedCluster == -1)
{
Output("No free clusters, abandoning split");
return DidSplit;
}
// do it
if (Verbose >= 1) Output("\n Trying to split cluster %d (%d points) \n", (int)c, (int)K2.nPoints);
K2.nStartingClusters = 2; // (2 = 1 clusters + 1 unused noise cluster)
UnsplitScore = K2.CEM(NULL, 0, 1, false);
K2.nStartingClusters = 3; // (3 = 2 clusters + 1 unused noise cluster)
SplitScore = K2.CEM(NULL, 0, 1, false);
// Fix by Michaël Zugaro: replace next line with following two lines
// if(SplitScore<UnsplitScore) {
if (K2.nClustersAlive<2) Output("\n Split failed - leaving alone\n");
if ((SplitScore<UnsplitScore) && (K2.nClustersAlive >= 2)) {
if (AlwaysSplitBimodal)
{
DidSplit = 1;
Output("\n We are always splitting bimodal clusters so it's getting split into cluster %d.\n", (int)UnusedCluster);
p2 = 0;
for (p = 0; p < nPoints; p++)
{
if (Class[p] == c)
{
if (K2.Class[p2] == 1) Class[p] = c;
else if (K2.Class[p2] == 2) Class[p] = UnusedCluster;
else Error("split should only produce 2 clusters\n");
p2++;
}
ClassAlive[Class[p]] = 1;
}
}
else
{
// will splitting improve the score in the whole data set?
// assign clusters to K3
for (c2 = 0; c2 < MaxPossibleClusters; c2++) K3.ClassAlive[c2] = 0;
// Output("%d Points in class %d in KKobject K3 ", (int)c2, (int)K3.nClassMembers[c2]);
p2 = 0;
for (p = 0; p < nPoints; p++)
{
if (Class[p] == c)
{
if (K2.Class[p2] == 1) K3.Class[p] = c;
else if (K2.Class[p2] == 2) K3.Class[p] = UnusedCluster;
else Error("split should only produce 2 clusters\n");
p2++;
}
else K3.Class[p] = Class[p];
K3.ClassAlive[K3.Class[p]] = 1;
}
K3.Reindex();
// compute scores
K3.MEstep();
//K3.MStep();
//K3.EStep();
//Output("About to compute K3 class penalties");
if (UseDistributional) K3.ComputeClassPenalties(); //SNK Fixed bug: Need to compute the cluster penalty properly, cluster penalty is only used in UseDistributional mode
NewScore = K3.ComputeScore();
Output("\nSplitting cluster %d changes total score from %f to %f\n", (int)c, Score, NewScore);
if (NewScore < Score)
{
DidSplit = 1;
Output("\n So it's getting split into cluster %d.\n", (int)UnusedCluster);
// so put clusters from K3 back into main KK struct (K1)
for (c2 = 0; c2 < MaxPossibleClusters; c2++) ClassAlive[c2] = K3.ClassAlive[c2];
for (p = 0; p < nPoints; p++) Class[p] = K3.Class[p];
}
else
{
Output("\n So it's not getting split.\n");
}
}
}
}
return DidSplit;
#undef K3
}
//=========================================ComputeScore==============================================
__global__ void copyLogP(int nPoints, int MaxPossibleClusters, int *d_Class, float *d_LogP, float *d_tempLogP) {
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints)
d_tempLogP[tidx] = d_LogP[tidx * MaxPossibleClusters + d_Class[tidx]];
}
// ComputeScore() - computes total score. Requires M, E, and C steps to have been run
float KK::ComputeScore()
{
if (useCpu) {
int p;
// int debugadd;
float penalty = (float)0;
if (UseDistributional) // For distributional algorithm we require the cluster penalty
for (int c = 0; c < MaxPossibleClusters; c++)
penalty += ClassPenalty[c];
else
penalty = Penalty(nClustersAlive);
float Score = penalty;
for (p = 0; p < nPoints; p++)
{ //debugadd = LogP[p*MaxPossibleClusters + Class[p]];
Score += LogP[p*MaxPossibleClusters + Class[p]];
// Output("point %d: cumulative score %f adding%f\n", (int)p, Score, debugadd);
}
//Error("Score: %f Penalty: %f\n", Score, penalty);
Output(" Score: Raw %f + Penalty %f = %f\n", Score - penalty, penalty, Score);
if (Debug) {
int c, cc;
float tScore;
for (cc = 0; cc < nClustersAlive; cc++) {
c = AliveIndex[cc];
tScore = 0;
for (p = 0; p < nPoints; p++) if (Class[p] == c) tScore += LogP[p*MaxPossibleClusters + Class[p]];
Output("class %d has subscore %f\n", c, tScore);
}
}
return Score;
}
//====================================GPU code=========================
else {
float penalty;// = reduceFlt<128>(d_ClassPenalty, MaxPossibleClusters);
cublasSasum(handle, MaxPossibleClusters, d_ClassPenalty, 1, &penalty);
copyLogP << <(nPoints+BLOCKDIM - 1)/BLOCKDIM, BLOCKDIM >> > (nPoints, MaxPossibleClusters, d_Class,
d_LogP,d_tempLogP);
float Score;
cublasSasum(handle, nPoints, d_tempLogP, 1, &Score);
Score = -Score;
//float Score = reduceFlt<128>(d_tempLogP, nPoints);
Output(" Score: Raw %f + Penalty %f = %f\n", Score, penalty, Score + penalty);
return Score + penalty;
}
}
// Initialise starting conditions by selecting unique masks at random
void KK::StartingConditionsFromMasks()
{
int nClusters2start = 0; //SNK To replace nStartingClusters within this variable only
//if (Debug)
// Output("StartingConditionsFromMasks: ");
Output("Starting initial clusters from distinct float masks \n ");
if (nStartingClusters <= 1) // If only 1 starting clutser has been requested, assign all the points to cluster 0
{
for (int p = 0; p<nPoints; p++)
Class[p] = 0;
}
else
{
int num_masks = 0;
for (int p = 0; p<nPoints; p++)
num_masks += (int)SortedMaskChange[p];
if ((nStartingClusters - 1)>num_masks)
{
Error("Not enough masks (%d) to generate starting clusters (%d), "
"so starting with (%d) clusters instead.\n", (int)num_masks,
(int)nStartingClusters, (int)(num_masks + 1));
nClusters2start = num_masks + 1;
//return;
}
else
{
nClusters2start = nStartingClusters;
}
// Construct the set of all masks
vector<bool> MaskUsed;
vector<int> MaskIndex(nPoints);
vector<int> MaskPointIndex;
int current_mask_index = -1;
for (int q = 0; q<nPoints; q++)
{
int p = SortedIndices[q];
if (q == 0 || SortedMaskChange[p])
{
current_mask_index++;
MaskUsed.push_back(false);
MaskPointIndex.push_back(p);
}
MaskIndex[p] = current_mask_index;
}
// Select points at random until we have enough masks
int masks_found = 0;
vector<int> MaskIndexToUse;
vector<int> FoundMaskIndex(num_masks);
while (masks_found<nClusters2start - 1)
{
int p = irand(0, nPoints - 1);
int mask_index = MaskIndex[p];
if (!MaskUsed[mask_index])
{
MaskIndexToUse.push_back(mask_index);
MaskUsed[mask_index] = true;
FoundMaskIndex[mask_index] = masks_found;
masks_found++;
}
}
// Assign points to clusters based on masks
for (int p = 0; p<nPoints; p++)
{
if (MaskUsed[MaskIndex[p]]) // we included this points mask
Class[p] = FoundMaskIndex[MaskIndex[p]] + 1; // so assign class to mask index
else // this points mask not included
{
// so find closest match
int closest_index = 0;
int distance = nDims + 1;
vector<int> possibilities;
for (int mi = 0; mi<nClusters2start - 1; mi++)
{
int mip = MaskPointIndex[MaskIndexToUse[mi]];
// compute mask distance
int curdistance = 0;
for (int i = 0; i<nDims; i++)
if (GetMasks(p*nDims + i) != GetMasks(mip*nDims + i))
curdistance++;
if (curdistance<distance)
{
possibilities.clear();
distance = curdistance;
}
if (curdistance == distance)
possibilities.push_back(mi);
}
if ((MaskStarts > 0) || AssignToFirstClosestMask)
closest_index = possibilities[0];
else
closest_index = possibilities[irand(0, possibilities.size() - 1)];
Class[p] = closest_index + 1;
}
}
// print some info
Output("Assigned %d initial classes from %d unique masks.\n",
(int)nClusters2start, (int)num_masks);
// Dump initial random classes to a file - knowledge of maskstart configuration may be useful
// TODO: remove this for final version - SNK: actually it is a nice idea to keep this
char fname[STRLEN];
FILE *fp;
sprintf(fname, "%s.initialclusters.%d.clu.%d", FileBase, (int)nClusters2start, (int)ElecNo);
fp = fopen_safe(fname, "w");
fprintf(fp, "%d\n", (int)nClusters2start);
for (int p = 0; p<nPoints; p++)
fprintf(fp, "%d\n", (int)Class[p]);
fclose(fp);
}
for (int c = 0; c<MaxPossibleClusters; c++)
ClassAlive[c] = (c<nClusters2start);
cudaMemcpy(d_Class, &Class[0], nPoints*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ClassAlive, &ClassAlive[0], MaxPossibleClusters* sizeof(int), cudaMemcpyHostToDevice);
}
//======================================CEM step======================================================
__global__ void updataTempOldClass(int nPoints, int *d_Class, int *d_OldClass, float *d_tempOldClass){
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < nPoints) {
if (d_Class[tidx] != d_OldClass[tidx])
d_tempOldClass[tidx] = 1.0;
else d_tempOldClass[tidx] = 0.0;
}
}
// CEM(StartFile) - Does a whole CEM algorithm from a random start or masked start
// whereby clusters are assigned according to the similarity of their masks
// optional start file loads this cluster file to start iteration
// if Recurse is 0, it will not try and split.
// if InitRand is 0, use cluster assignments already in structure
float KK::CEM(char *CluFile, int Recurse, int InitRand,
bool allow_assign_to_noise)
{
int p;
int nChanged;
int Iter;
//vector<int> OldClass(nPoints);
//thrust::device_vector<int> d_OldClass(nPoints);
float Score, OldScore;
int LastStepFull; // stores whether the last step was a full one
int DidSplit = 0;
if (Debug)
{
Output("Entering CEM \n");
}
int time1 = clock();
if (CluFile && *CluFile)
LoadClu(CluFile);
else if (InitRand)
{
// initialize data to random
if ((MaskStarts || UseMaskedInitialConditions) && (UseDistributional) && Recurse)
StartingConditionsFromMasks();//2.5s
}
CopyHostToDevice();
float preComputeTime = (clock() - time1) / (float)CLOCKS_PER_SEC;
Output("==========================preComputeTime is : %f\n", preComputeTime);
//int sizeI = sizeof(int);
//int sizeF = sizeof(float);
//gpuErrchk(cudaMemcpy(d_nClassMembers, &nClassMembers[0], MaxPossibleClusters*sizeI, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_AliveIndex, &AliveIndex[0], MaxPossibleClusters*sizeI, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_ClusterMask, &ClusterMask[0], MaxPossibleClusters*nDims*sizeF, cudaMemcpyHostToDevice));
//gpuErrchk(cudaMemcpy(d_Mean, &Mean[0], MaxPossibleClusters*nDims*sizeF, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_Weight, &Weight[0], MaxPossibleClusters*sizeF, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_LogP, &LogP[0], MaxPossibleClusters*nPoints*sizeF, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_OldClass, &OldClass[0], nPoints*sizeI, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_ClassPenalty, &ClassPenalty[0], MaxPossibleClusters*sizeF, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_Class2, &Class2[0], nPoints*sizeI, cudaMemcpyHostToDevice));//==
//gpuErrchk(cudaMemcpy(d_BestClass, &BestClass[0], nPoints*sizeI, cudaMemcpyHostToDevice));//==
// set all classes to alive
Reindex();
// main loop
Iter = 0;
FullStep = 1;
Score = 0.0;
do {
Output("this is %d cycle...\n", Iter);
//========================
if (useCpu) for (p = 0; p < nPoints; p++) OldClass[p] = Class[p];
else
cudaMemcpy(d_OldClass, d_Class, nPoints*sizeof(int), cudaMemcpyDeviceToDevice);
//========================
// M-step - calculate class weights, means, and covariance matrices for each class
// E-step - calculate scores for each point to belong to each class
int time3 = clock();
MEstep();
Output("==========================MEstep : %f\n", (clock() - time3) / (float)CLOCKS_PER_SEC);
int time4 = clock();
// C-step - choose best class for each
CStep(allow_assign_to_noise);
// Compute class penalties
ComputeClassPenalties();
// Would deleting any classes improve things?
if (Recurse) ConsiderDeletion();
//================================
// Calculate number changed
nChanged = 0;
if(useCpu) for (p = 0; p < nPoints; p++) nChanged += (OldClass[p] != Class[p]);
else {
updataTempOldClass << < (nPoints+BLOCKDIM - 1)/ BLOCKDIM,BLOCKDIM>> > (nPoints,d_Class,d_OldClass,d_tempOldClass);
float nchanged;
cublasSasum(handle, nPoints,d_tempOldClass, 1, &nchanged);
nChanged = (int)nchanged;//reduceInt<128>(d_tempOldClass, nPoints);
}
//===============================
//Compute elapsed time
timesofar = (clock() - Clock0) / (float)CLOCKS_PER_SEC;
//Output("\nTime so far%f seconds.\n", timesofar);
//Write start of Output to klg file
if (Verbose >= 1)
{
if (Recurse == 0) Output("\t\tSP:");
if ((Recurse != 0) || (SplitInfo == 1 && Recurse == 0))
Output("Iteration %d%c (%f sec): %d clusters\n",
(int)Iter, FullStep ? 'F' : 'Q', timesofar, (int)nClustersAlive);
}
// Calculate score
OldScore = Score;
Score = ComputeScore();
Output("==========================other step : %f\n", (clock() - time4) / (float)CLOCKS_PER_SEC);
int time5 = clock();
//Finish Output to klg file with Score already returned via the ComputeScore() function
if (Verbose >= 1)
{
Output(" nChanged %d\n", (int)nChanged);
}
//if(Verbose>=1)
//{
// if(Recurse==0) Output("\t");
// Output(" Iteration %d%c: %d clusters Score %.7g nChanged %d\n",
// (int)Iter, FullStep ? 'F' : 'Q', (int)nClustersAlive, Score, (int)nChanged);
//}
Iter++;
numiterations++;
global_numiterations++;
iteration_metric2 += (float)(nDims*nDims)*(float)(nPoints);
iteration_metric3 += (float)(nDims*nDims)*(float)(nDims*nPoints);
//if (Debug)
//{
// for (p = 0; p<nPoints; p++) BestClass[p] = Class[p];
// SaveOutput();
// Output("Press return");
// getchar();
//}
// Next step a full step?
LastStepFull = FullStep;
FullStep = (
nChanged > ChangedThresh*nPoints
|| nChanged == 0
|| Iter%FullStepEvery == 0
|| Score > OldScore // SNK: Resurrected
//SNK Score decreases ARE because of quick steps!
);
if (Iter > MaxIter)
{
Output("Maximum iterations exceeded\n");
break;
}
//Save a temporary clu file when not splitting
if ((SaveTempCluEveryIter && Recurse) && (OldScore > Score))
{
//SaveTempOutput(); //SNK Saves a temporary Output clu file on each iteration
Output("Writing temp clu file \n");
Output("Because OldScore, %f, is greater than current (better) Score,%f \n ", OldScore, Score);
}
// try splitting
//int mod = (abs(Iter-SplitFirst))%SplitEvery;
//Output("\n Iter mod SplitEvery = %d\n",(int)mod);
//Output("Iter-SplitFirst %d \n",(int)(Iter-SplitFirst));
//if ((Recurse && SplitEvery > 0) && (Iter == SplitFirst || (Iter >= SplitFirst + 1 && (Iter - SplitFirst) % SplitEvery == SplitEvery - 1) || (nChanged == 0 && LastStepFull)))
//{
// if (OldScore > Score) //This should be trivially true for the first run of KlustaKwik
// {
// //SaveTempOutput(); //SNK Saves a temporary Output clu file before each split
// Output("Writing temp clu file \n");
// Output("Because OldScore, %f, is greater than current (better) Score,%f \n ", OldScore, Score);
// }
// DidSplit = TrySplits();
//}
//else DidSplit = 0;
//Output("==========================trysplit : %f\n", (clock() - time5) / (float)CLOCKS_PER_SEC);
} while (nChanged > 0 || !LastStepFull || DidSplit);
if (DistDump) fprintf(Distfp, "\n");
return Score;
}
// does the two-step clustering algorithm:
// first make a subset of the data, to SubPoints points
// then run CEM on this
// then use these clusters to do a CEM on the full data
// It calls CEM whenever there is no initialization clu file (i.e. the most common usage)
float KK::Cluster(char *StartCluFile = NULL)
{
if (Debug)
{
Output("Entering Cluster \n");
}
if (Subset <= 1)
{ // don't subset
Output("------ Clustering full data set of %d points ------\n", (int)nPoints);
return CEM(NULL, 1, 1);
}
//// otherwise run on a subset of points
//int sPoints = nPoints / Subset; // number of subset points - int division will round down
//vector<int> SubsetIndices(sPoints);
//for (int i = 0; i<sPoints; i++)
// // choose point to include, evenly spaced plus a random offset
// SubsetIndices[i] = Subset*i + irand(0, Subset - 1);
//KK KKSub = KK(*this, SubsetIndices);
//// run CEM algorithm on KKSub
//Output("------ Running on subset of %d points ------\n", (int)sPoints);
//KKSub.CEM(NULL, 1, 1);
//// now copy cluster shapes from KKSub to main KK
//Weight = KKSub.Weight;
//Mean = KKSub.Mean;
//Cov = KKSub.Cov;
//DynamicCov = KKSub.DynamicCov;
//ClassAlive = KKSub.ClassAlive;
//nClustersAlive = KKSub.nClustersAlive;
//AliveIndex = KKSub.AliveIndex;
//// Run E and C steps on full data set
//Output("------ Evaluating fit on full set of %d points ------\n", (int)nPoints);
//if (UseDistributional)
// ComputeClusterMasks(); // needed by E-step normally computed by M-step
////EStep();
//CStep();
//// compute score on full data set and leave
//return ComputeScore();
}
// Initialise by loading data from files
KK::KK(char *FileBase, int ElecNo, char *UseFeatures,
float PenaltyK, float PenaltyKLogN, int PriorPoint)
{
cublasCreate(&handle);
cusolverDnCreate(&solver_handle);
side = CUBLAS_SIDE_LEFT;
uplo = CUBLAS_FILL_MODE_UPPER;
trans = CUBLAS_OP_T;
diag = CUBLAS_DIAG_NON_UNIT;
KK_split = NULL;
K2_container = NULL;
penaltyK = PenaltyK;
penaltyKLogN = PenaltyKLogN;
LoadData(FileBase, ElecNo, UseFeatures);
priorPoint = PriorPoint;
//NOTE: penaltyK, penaltyKlogN, priorPoint, lower case versions of global variable PenaltyK PenaltyKLogN and PriorPoint
DoInitialPrecomputations();//Now DoPrecomputations is only invoked in the initialization
numiterations = 0;
init_type = 0;
}
// This function is used by both of the constructors below, it initialises
// the data from a source KK object with a subset of the indices.
//used trysplit() --
void KK::ConstructFrom(const KK &Source, const vector<int> &Indices)
{
KK_split = NULL;
K2_container = NULL;
nDims = Source.nDims;
nDims2 = nDims*nDims;
nPoints = Indices.size();
penaltyK = Source.penaltyK;
penaltyKLogN = Source.penaltyKLogN;
priorPoint = Source.priorPoint;
nStartingClusters = Source.nStartingClusters;
NoisePoint = Source.NoisePoint;
FullStep = Source.FullStep;
nClustersAlive = Source.nClustersAlive;
numiterations = Source.numiterations;
//define cublas and cusolver handle
handle = Source.handle;
solver_handle = Source.solver_handle;
side = Source.side;
uplo = Source.uplo;
trans = Source.trans;
diag = Source.diag;
AllocateArrays(); // ===========================Set storage for all the arrays such as Data, FloatMasks, Weight, Mean, Cov, etc.
if (Debug)
{
Output("Entering ConstructFrom: \n");
}
// fill with a subset of points
for (int p = 0; p<nPoints; p++)
{
int psource = Indices[p];
//copy data and masks
for (int d = 0; d<nDims; d++)
Data[p*nDims + d] = Source.Data[psource*nDims + d];
if (Source.Masks.size()>0)
{
for (int d = 0; d<nDims; d++)
Masks[p*nDims + d] = Source.Masks[psource*nDims + d];
}
if (UseDistributional)
{
for (int d = 0; d<nDims; d++)
{
FloatMasks[p*nDims + d] = Source.FloatMasks[psource*nDims + d];
}
}
UnMaskDims[p] = Source.UnMaskDims[psource];
}
//Output(" Printing Source.NoiseVariance[2] = %f",Source.NoiseVariance[2]);
if (UseDistributional)
{
NoiseMean.resize(nDims);
NoiseVariance.resize(nDims);
nMasked.resize(nDims);
for (int d = 0; d<nDims; d++)
{
NoiseMean[d] = Source.NoiseMean[d];
NoiseVariance[d] = Source.NoiseVariance[d];
nMasked[d] = Source.nMasked[d];
}
}
DoPrecomputations();
//Output(" Printing Source.NoiseMean[2] = %f",NoiseVariance[2]);
numiterations = 0;
}
void KK::ConstructFrom(const KK &Source)
{
vector<int> Indices(Source.nPoints);
for (int i = 0; i<Source.nPoints; i++)
Indices[i] = i;
ConstructFrom(Source, Indices);
}
KK::KK(const KK &Source, const vector<int> &Indices)
{
ConstructFrom(Source, Indices);
init_type = 2;
}
// If we don't specify an index subset, use everything.
//invoke in the trysplit step
KK::KK(const KK &Source)
{
ConstructFrom(Source);
init_type = 1;
}
KK::~KK()
{
if (KK_split) delete KK_split;
KK_split = NULL;
if (K2_container) delete K2_container;
K2_container = NULL;
//cublasDestroy(handle);
//cusolverDnDestroy(solver_handle);
}
// Main loop
//int main(int argc, char **argv)
int main()
{
float Score;
float BestScore = HugeScore;
int p, i;
char fname[STRLEN];
if (Log) {
sprintf(fname, "%s.klg.%d", FileBase, (int)ElecNo);
logfp = fopen_safe(fname, "w");
}
//SetupParams((int)argc, argv); // This function is defined in parameters.cpp
//getchar();
Output("Starting KlustaKwik. Version: %s\n", VERSION);
//if (RamLimitGB == 0.0)
//{
// RamLimitGB = (1.0*total_physical_memory()) / (1024.0*1024.0*1024.0);
// Output("Setting RAM limit to total physical memory, %.2f GB.\n", (double)RamLimitGB);
//}
//else if (RamLimitGB < 0.0)
//{
// RamLimitGB = 1e20;
// Output("WARNING: You have chosen not to set a RAM limit, this may cause problems.\n");
//}
//clock_t Clock0 = clock();
Clock0 = clock();
clock_t etime = 0;
clock_t mtime = 0;
#ifdef _OPENMP
double start_time = omp_get_wtime();
#endif
// The main KK object, loads the data and does some precomputations
KK K1(FileBase, ElecNo, UseFeatures, PenaltyK, PenaltyKLogN, PriorPoint);
int constructor = (clock() - Clock0) / (float)CLOCKS_PER_SEC;
Output("Time taken for constructor:%f seconds.\n", constructor);
if (UseDistributional && SaveSorted) //Bug fix (Classical KK would terminate here)
K1.SaveSortedData();
Output("\nFileBase : %s\n ----------------------------------------------------------------\n", FileBase);
// Seed random number generator
srand((unsigned int)RandomSeed);
// open distance dump file if required
if (DistDump) Distfp = fopen("DISTDUMP", "w");
// start with provided file, if required
if (*StartCluFile)
{
Output("\nStarting from cluster file %s\n", StartCluFile);
float iterationtime = (float)clock();
BestScore = K1.CEM(StartCluFile, 1, 1); //Main computation
iterationtime = (clock() - iterationtime) / (float)CLOCKS_PER_SEC;
Output("Time taken for this iteration:%f seconds.\n", iterationtime);
Output(" %d->%d Clusters: Score %f\n\n", (int)K1.nStartingClusters, (int)K1.nClustersAlive, BestScore);
for (p = 0; p<K1.nPoints; p++)
K1.BestClass[p] = K1.Class[p];
K1.SaveOutput();
}
else
{
// loop through numbers of clusters ...
K1.nStartingClusters = MaskStarts;
// do CEM iteration
Output("\nStarting from %d clusters...\n", (int)K1.nStartingClusters);
float iterationtime = (float)clock();
Score = K1.Cluster(); //Main computation
iterationtime = (clock() - iterationtime) / (float)CLOCKS_PER_SEC;
Output("Time taken for this iteration:%f seconds.\n", iterationtime);
Output(" %d->%d Clusters: Score %f, best is %f\n", (int)K1.nStartingClusters, (int)K1.nClustersAlive, Score, BestScore);
if (Score < BestScore)
{
Output("THE BEST YET!\n"); // New best classification found
BestScore = Score;
cudaMemcpy(&K1.BestClass[0], K1.d_Class, K1.nPoints * sizeof(int), cudaMemcpyDeviceToHost);
//for (p = 0; p < K1.nPoints; p++)
//K1.BestClass[p] = K1.Class[p];
K1.SaveOutput();
}
Output("\n");
}
K1.SaveOutput();
K1.FreeArray();
cudaDeviceReset();
#ifdef _OPENMP
float tottime = omp_get_wtime() - start_time;
#else
float tottime = (clock() - Clock0) / (float)CLOCKS_PER_SEC;
#endif
Output("E step: %d (time per iteration =%f ms)\n",
(int)K1.numiterations,
1e3*etime / (float)CLOCKS_PER_SEC / K1.numiterations);
Output("M step: %d (time per iteration =%f ms)\n",
(int)K1.numiterations,
1e3*mtime / (float)CLOCKS_PER_SEC / K1.numiterations);
Output("Main iterations: %d (time per iteration =%f ms)\n",
(int)K1.numiterations,
1e3*tottime / K1.numiterations);
Output("Total iterations: %d (time per iteration =%f ms)\n",
(int)global_numiterations,
1e3*tottime / global_numiterations);
Output("\nDef. Iteration metric 2:\nIteration_metric2 += (float)(nDims*nDims)*(float)(nPoints)\n");
Output("Iterations metric 2: %f (time per metric unit =%fns)\n",
iteration_metric2,
1e9*tottime / iteration_metric2);
Output("\nDef. Iteration metric 3:\nIteration_metric3 += (float)(nDims*nDims)*(float)(nDims*nPoints)\n");
Output("Iterations metric 3: %f (time per metric unit=%fps)\n",
iteration_metric3,
1e12*tottime / iteration_metric3);
Output("\nThat took %f seconds.\n", tottime);
if (DistDump) fclose(Distfp);
//Output("maxsize = %d\n", maxsize);
//getchar();
return 0;
}
|
ec0c61aae1b11eeb2f96c655388e9df444e62eff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "PCA.cuh"
namespace gtom
{
__global__ void PCAReconstructKernel(tfloat* d_eigenvectors, tfloat* d_eigenvalues, uint length, uint samples, uint ncomponents, tfloat* d_output);
void d_PCAFilter(tfloat* d_input, int length, int samples, int ncomponents, tfloat* d_filtered)
{
// Center the data
tfloat* d_mean;
hipMalloc((void**)&d_mean, length * sizeof(tfloat));
d_ReduceMean(d_input, d_mean, length, samples);
// PCA
tfloat* d_eigenvectors;
hipMalloc((void**)&d_eigenvectors, length * ncomponents * sizeof(tfloat));
tfloat* d_eigenvalues;
hipMalloc((void**)&d_eigenvalues, samples * ncomponents * sizeof(tfloat));
tfloat* d_residuals;
hipMalloc((void**)&d_residuals, samples * length * sizeof(tfloat));
d_PCANIPALS(d_input, samples, length, ncomponents, d_eigenvalues, d_eigenvectors, d_residuals);
// Reconstruct and add previously subtracted mean
d_PCAReconstruct(d_eigenvectors, d_eigenvalues, length, samples, ncomponents, d_filtered);
d_AddVector(d_filtered, d_mean, d_filtered, length, samples);
// Clean up
hipFree(d_residuals);
hipFree(d_eigenvalues);
hipFree(d_eigenvectors);
hipFree(d_mean);
}
void d_PCAReconstruct(tfloat* d_eigenvectors, tfloat* d_eigenvalues, int length, int samples, int ncomponents, tfloat* d_output)
{
dim3 TpB = dim3(min(192, NextMultipleOf(length, 32)));
dim3 grid = dim3(min(32768, samples));
PCAReconstructKernel << <grid, TpB >> > (d_eigenvectors, d_eigenvalues, length, samples, ncomponents, d_output);
}
__global__ void PCAReconstructKernel(tfloat* d_eigenvectors, tfloat* d_eigenvalues, uint length, uint samples, uint ncomponents, tfloat* d_output)
{
for (uint sample = blockIdx.x; sample < samples; sample += gridDim.x)
{
for (uint element = threadIdx.x; element < length; element += blockDim.x)
{
tfloat sum = 0;
for (uint component = 0; component < ncomponents; component++)
{
tfloat vectorelement = d_eigenvectors[component * length + element];
tfloat value = d_eigenvalues[sample * ncomponents + component];
sum += vectorelement * value;
}
d_output[sample * length + element] = sum;
}
}
}
} | ec0c61aae1b11eeb2f96c655388e9df444e62eff.cu | #include "Prerequisites.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "PCA.cuh"
namespace gtom
{
__global__ void PCAReconstructKernel(tfloat* d_eigenvectors, tfloat* d_eigenvalues, uint length, uint samples, uint ncomponents, tfloat* d_output);
void d_PCAFilter(tfloat* d_input, int length, int samples, int ncomponents, tfloat* d_filtered)
{
// Center the data
tfloat* d_mean;
cudaMalloc((void**)&d_mean, length * sizeof(tfloat));
d_ReduceMean(d_input, d_mean, length, samples);
// PCA
tfloat* d_eigenvectors;
cudaMalloc((void**)&d_eigenvectors, length * ncomponents * sizeof(tfloat));
tfloat* d_eigenvalues;
cudaMalloc((void**)&d_eigenvalues, samples * ncomponents * sizeof(tfloat));
tfloat* d_residuals;
cudaMalloc((void**)&d_residuals, samples * length * sizeof(tfloat));
d_PCANIPALS(d_input, samples, length, ncomponents, d_eigenvalues, d_eigenvectors, d_residuals);
// Reconstruct and add previously subtracted mean
d_PCAReconstruct(d_eigenvectors, d_eigenvalues, length, samples, ncomponents, d_filtered);
d_AddVector(d_filtered, d_mean, d_filtered, length, samples);
// Clean up
cudaFree(d_residuals);
cudaFree(d_eigenvalues);
cudaFree(d_eigenvectors);
cudaFree(d_mean);
}
void d_PCAReconstruct(tfloat* d_eigenvectors, tfloat* d_eigenvalues, int length, int samples, int ncomponents, tfloat* d_output)
{
dim3 TpB = dim3(min(192, NextMultipleOf(length, 32)));
dim3 grid = dim3(min(32768, samples));
PCAReconstructKernel << <grid, TpB >> > (d_eigenvectors, d_eigenvalues, length, samples, ncomponents, d_output);
}
__global__ void PCAReconstructKernel(tfloat* d_eigenvectors, tfloat* d_eigenvalues, uint length, uint samples, uint ncomponents, tfloat* d_output)
{
for (uint sample = blockIdx.x; sample < samples; sample += gridDim.x)
{
for (uint element = threadIdx.x; element < length; element += blockDim.x)
{
tfloat sum = 0;
for (uint component = 0; component < ncomponents; component++)
{
tfloat vectorelement = d_eigenvectors[component * length + element];
tfloat value = d_eigenvalues[sample * ncomponents + component];
sum += vectorelement * value;
}
d_output[sample * length + element] = sum;
}
}
}
} |
55ebd7ec7eb83c012b823e2061a443a07890f5be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/adagrad_op.h"
namespace caffe2 {
__global__ void AdagradUpdate(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr,
float weight_decay = 0.f) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i] + weight_decay * w[i];
float hi = nh[i] = decay * h[i] + gi * gi;
nw[i] = w[i] + lr[0] * gi / (sqrtf(hi) + epsilon);
}
}
template <>
void adagrad_update<CUDAContext>(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr,
CUDAContext* context,
float weight_decay) {
hipLaunchKernelGGL(( AdagradUpdate),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N, w, g, h, nw, nh, epsilon, decay, lr, weight_decay);
}
template <typename SIndex, typename THalf>
__global__ void SparseAdagradKernel(
const size_t N,
const size_t grad_slice_sz,
const float epsilon,
THalf* param,
THalf* param_mom,
const SIndex* indices,
const float* grad,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
CUDA_1D_KERNEL_LOOP(i, N) {
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
float gi = grad[gradIdx] + weight_decay * param[paramIdx];
float mom_new = gi * gi + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
}
/**
* Calculate RowwiseSparseAdagrad
* M: gradients.dims[0]
* N: gradients.size_from_dim(1)
* grad: pointer to the gradients
* param: pointer to weights
* param_mom: pointer to the momentum
* indices: keys
*/
template <typename SIndex>
__global__ void RowWiseSparseAdagradKernel(
const int M,
const int N,
const float epsilon,
float* param,
float* param_mom,
const SIndex* indices,
const float* grad,
const float* lr,
float weight_decay = 0.f) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(N, CAFFE_CUDA_NUM_THREADS);
// in case gridDim is smaller than M
for (int i = blockIdx.x; i < M; i += gridDim.x) {
const SIndex index = indices[i];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
// in case N is bigger than block size which is 512 by default
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j] + weight_decay * param[index * N + j];
sum_squares += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_result / (float)N;
param_mom[index] += row_sum_squares_avg;
}
__syncthreads();
// update param
float step = lr[0] / (sqrtf(param_mom[index]) + epsilon);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j] + weight_decay * param[index * N + j];
param[index * N + j] = param[index * N + j] + x_ij * step;
}
}
}
template <typename T, class Context>
class CUDASparseAdagradOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDASparseAdagradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
LOG(INFO) << "gradient optimization operator in use: "
<< "CUDASparseAdagradOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename THalf>
bool DoRunWithType2() {
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const auto* gradIn = Input(GRAD).template data<T>();
const auto* paramIn = Input(PARAM).template data<THalf>();
const auto* momentIn = Input(MOMENT_1).template data<THalf>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<THalf>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<THalf>();
auto N = Input(GRAD).size();
auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim());
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
hipLaunchKernelGGL(( SparseAdagradKernel<IndexType, THalf>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
grad_slice_sz,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<THalf>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<THalf>(),
Input(INDICES).template data<IndexType>(),
Input(GRAD).template data<float>(),
Input(LR).template data<float>(),
weight_decay_);
return true;
}
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <>
template <typename SIndex>
bool RowWiseSparseAdagradOp<CUDAContext>::DoRunWithType() {
auto N = Input(GRAD).size();
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
// size of the 1st dimension of the input gradient
auto GRAD_M = Input(GRAD).dim32(0);
auto GRAD_N = N / GRAD_M;
// Cases with GRAND_N < 128 can have more swarms if number of threads is lower
int num_threads = CAFFE_CUDA_NUM_THREADS;
if (GRAD_N < num_threads) {
num_threads = GRAD_N;
}
// each thread block will handle multiple rows of the input and output
hipLaunchKernelGGL(( RowWiseSparseAdagradKernel),
dim3(::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(num_threads),
0,
context_.cuda_stream(),
GRAD_M,
GRAD_N,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
Input(LR).template data<float>(),
weight_decay_);
return true;
}
REGISTER_CUDA_OPERATOR(Adagrad, AdagradOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(SparseAdagrad, CUDASparseAdagradOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagrad,
RowWiseSparseAdagradOp<CUDAContext>);
} // namespace caffe2
| 55ebd7ec7eb83c012b823e2061a443a07890f5be.cu | #include <algorithm>
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/adagrad_op.h"
namespace caffe2 {
__global__ void AdagradUpdate(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr,
float weight_decay = 0.f) {
CUDA_1D_KERNEL_LOOP(i, N) {
float gi = g[i] + weight_decay * w[i];
float hi = nh[i] = decay * h[i] + gi * gi;
nw[i] = w[i] + lr[0] * gi / (sqrtf(hi) + epsilon);
}
}
template <>
void adagrad_update<CUDAContext>(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr,
CUDAContext* context,
float weight_decay) {
AdagradUpdate<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N, w, g, h, nw, nh, epsilon, decay, lr, weight_decay);
}
template <typename SIndex, typename THalf>
__global__ void SparseAdagradKernel(
const size_t N,
const size_t grad_slice_sz,
const float epsilon,
THalf* param,
THalf* param_mom,
const SIndex* indices,
const float* grad,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
CUDA_1D_KERNEL_LOOP(i, N) {
const size_t gradIdx = i;
const SIndex index = indices[i / grad_slice_sz];
const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz);
float gi = grad[gradIdx] + weight_decay * param[paramIdx];
float mom_new = gi * gi + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
}
/**
* Calculate RowwiseSparseAdagrad
* M: gradients.dims[0]
* N: gradients.size_from_dim(1)
* grad: pointer to the gradients
* param: pointer to weights
* param_mom: pointer to the momentum
* indices: keys
*/
template <typename SIndex>
__global__ void RowWiseSparseAdagradKernel(
const int M,
const int N,
const float epsilon,
float* param,
float* param_mom,
const SIndex* indices,
const float* grad,
const float* lr,
float weight_decay = 0.f) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(N, CAFFE_CUDA_NUM_THREADS);
// in case gridDim is smaller than M
for (int i = blockIdx.x; i < M; i += gridDim.x) {
const SIndex index = indices[i];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
// in case N is bigger than block size which is 512 by default
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j] + weight_decay * param[index * N + j];
sum_squares += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_result / (float)N;
param_mom[index] += row_sum_squares_avg;
}
__syncthreads();
// update param
float step = lr[0] / (sqrtf(param_mom[index]) + epsilon);
for (int j = threadIdx.x; j < N; j += blockDim.x) {
const float x_ij = grad[i * N + j] + weight_decay * param[index * N + j];
param[index * N + j] = param[index * N + j] + x_ij * step;
}
}
}
template <typename T, class Context>
class CUDASparseAdagradOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDASparseAdagradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
LOG(INFO) << "gradient optimization operator in use: "
<< "CUDASparseAdagradOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename THalf>
bool DoRunWithType2() {
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const auto* gradIn = Input(GRAD).template data<T>();
const auto* paramIn = Input(PARAM).template data<THalf>();
const auto* momentIn = Input(MOMENT_1).template data<THalf>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<THalf>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<THalf>();
auto N = Input(GRAD).size();
auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim());
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
SparseAdagradKernel<IndexType, THalf>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
grad_slice_sz,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<THalf>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<THalf>(),
Input(INDICES).template data<IndexType>(),
Input(GRAD).template data<float>(),
Input(LR).template data<float>(),
weight_decay_);
return true;
}
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <>
template <typename SIndex>
bool RowWiseSparseAdagradOp<CUDAContext>::DoRunWithType() {
auto N = Input(GRAD).size();
if (N == 0) {
// empty grad, nothing to do here, not even launching the kernel
return true;
}
// size of the 1st dimension of the input gradient
auto GRAD_M = Input(GRAD).dim32(0);
auto GRAD_N = N / GRAD_M;
// Cases with GRAND_N < 128 can have more swarms if number of threads is lower
int num_threads = CAFFE_CUDA_NUM_THREADS;
if (GRAD_N < num_threads) {
num_threads = GRAD_N;
}
// each thread block will handle multiple rows of the input and output
RowWiseSparseAdagradKernel<<<
std::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS),
num_threads,
0,
context_.cuda_stream()>>>(
GRAD_M,
GRAD_N,
epsilon_,
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Input(INDICES).template data<SIndex>(),
Input(GRAD).template data<float>(),
Input(LR).template data<float>(),
weight_decay_);
return true;
}
REGISTER_CUDA_OPERATOR(Adagrad, AdagradOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(SparseAdagrad, CUDASparseAdagradOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagrad,
RowWiseSparseAdagradOp<CUDAContext>);
} // namespace caffe2
|
b43390e2ea4cc558315191282c242f418ee6e0f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ========== ========== ========== ========= */
// Breadth First Search (BFS) //
// Algorithm in CUDA //
/* ========== ========== ========== ========== */
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "helpers.cuh"
#include <assert.h>
struct Edge {
int vertex;
struct Edge * next;
};
struct Edge ** adjacencyList;
// Size is made (vertices + 1) to use the
// array as 1-indexed, for simplicity
/*All global variables comes here**/
int * parent; //int parent[vertices + 1];
// Each element holds the Node value of its parent
int * level; int * startVertices;
int * inputsCircuits;
int * lev;
int *flag;
int startArrayCount; //int level[vertices + 1];
// Each element holds the Level value of that node
//define variables in unified memory
int * vertices;
int * edges;
int v1,v2,i;
__device__ void recursiveTraverse(int lev, struct Edge * adjacencyList[],int level[],int parent[],int i,int vertices){
int par;
struct Edge * traverse;
if ((level[i] == lev)&(i<vertices)) {
flag = 1;
traverse = adjacencyList[i];
par = i;
printf("%d \n",par);
while (traverse != NULL) {
if (level[traverse->vertex] != -1) {
traverse = traverse->next;
continue;
}
level[traverse->vertex] = lev + 1;
printf("%d ",level[traverse->vertex]);
parent[traverse->vertex] = par;
traverse = traverse->next;
++i;
recursiveTraverse(lev,adjacencyList,level,parent,i,vertices);
}
}else if(i<vertices){
++i;
recursiveTraverse(lev,adjacencyList,level,parent,i,vertices);
}
}
// Inserts Node to the Linked List by Head Insertion - O(1)
// Returns address of head which is the newly created node.
struct Edge * AddEdge(struct Edge * currentHead, int newVertex)
{
struct Edge * newHead
= (struct Edge *) malloc(sizeof(struct Edge));
newHead->vertex = newVertex;
newHead->next = currentHead;
return newHead;
}
__global__ void BreadthFirstSearch(
struct Edge * adjacencyList[],
int vertices,
int parent[],
int level[],
int startVertices[],int count
){
int i;
// 'lev' represents the level to be assigned
// 'par' represents the parent to be assigned
// 'flag' used to indicate if graph is exhausted
hipStream_t s1;
hipStreamCreateWithFlags(&s1,hipStreamNonBlocking);
lev=0;
for(i=0;i<count;i++){
int k =startVertices[i];
level[k] = lev;
}
// We start at startVertex
while (flag) {
flag = 0;
hipLaunchKernelGGL(( recursiveTraverse), dim3(1),dim3(1),0,s1, lev,adjacencyList,level,parent,0,vertices);
++lev;
}
}
int main()
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
hipSetDevice(0);
int NNZ;
int noOfRows,noOfCols;
FILE * graphFile =fopen("graph.txt","r");
fscanf(graphFile, "%d %d %d",&noOfRows, &noOfCols, &NNZ);
printf("No fo rows %d, No of Cols %d, nnz %d \n",noOfRows,noOfCols,NNZ); //- done
vertices = noOfRows;
edges =NNZ;
hipMallocManaged(&parent,vertices*sizeof(int));
hipMallocManaged(&level,vertices*sizeof(int));
hipMallocManaged(&adjacencyList,vertices*sizeof(struct Edge *));
hipMallocManaged(&vertice, 1*sizeof(int));
hipMallocManaged(&edge, 1*sizeof(int));
hipMallocManaged(&lev, 1*sizeof(int));
hipMallocManaged(&flag, 1*sizeof(int));
flag=1;
// Must initialize your array
for (i = 0; i < vertices; ++i) {
adjacencyList[i] = NULL;
parent[i] = 0;
level[i] = -1;
}
for (i = 0; i < edges; ++i) {
int val;
fscanf(graphFile, "%d %d %d",&v1, &v2, &val);
// Adding edge v1 --> v2
adjacencyList[v1] = AddEdge(adjacencyList[v1], v2);
// Adding edge v2 --> v1
// Remove this if you want a Directed Graph
// adjacencyList[v2] = AddEdge(adjacencyList[v2], v1);
}
// Printing Adjacency List
printf("\nAdjacency List - of graph \n\n");
for (i = 0; i < vertices; ++i) {
printf("adjacencyList[%d] -> ", i);
struct Edge * traverse = adjacencyList[i];
while (traverse != NULL) {
printf("%d -> ", traverse->vertex);
traverse = traverse->next;
}
printf("NULL\n");
}
printf("geting starting list of inputs:\n");
FILE * vectorFile= fopen("input.txt","r");
fscanf(vectorFile,"%d",&startArrayCount);
// int inputsCircuits[startArrayCount];
hipMallocManaged(&inputsCircuits,startArrayCount*sizeof(int));
for(i=0;i<startArrayCount;i++){
int tempVal;
fscanf(vectorFile,"%d",&tempVal);
inputsCircuits[i]= tempVal;
printf("%d ,",inputsCircuits[i]);
}
printf("\n");
hipEvent_t start,stop;
float elapsedtime;
hipEventCreate(&start);
hipEventRecord(start,0);
hipLaunchKernelGGL(( BreadthFirstSearch), dim3(1),dim3(1), 0, 0, adjacencyList, vertices, parent, level, inputsCircuits ,startArrayCount);
hipDeviceSynchronize();
checkCudaError();
//stop Time measurement
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedtime,start,stop);
fprintf(stderr,"Time spent for kernel : %.10f seconds\n",elapsedtime/(float)1000);
// Printing Level and Parent Arrays
printf("\nLevel and Parent Arrays -\n");
for (i = 1; i <= vertices; ++i) {
printf("Level of Vertex %d is %d, Parent is %d\n",
i, level[i], parent[i]);
}
return 0;
}
| b43390e2ea4cc558315191282c242f418ee6e0f9.cu | /* ========== ========== ========== ========= */
// Breadth First Search (BFS) //
// Algorithm in CUDA //
/* ========== ========== ========== ========== */
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "helpers.cuh"
#include <assert.h>
struct Edge {
int vertex;
struct Edge * next;
};
struct Edge ** adjacencyList;
// Size is made (vertices + 1) to use the
// array as 1-indexed, for simplicity
/*All global variables comes here**/
int * parent; //int parent[vertices + 1];
// Each element holds the Node value of its parent
int * level; int * startVertices;
int * inputsCircuits;
int * lev;
int *flag;
int startArrayCount; //int level[vertices + 1];
// Each element holds the Level value of that node
//define variables in unified memory
int * vertices;
int * edges;
int v1,v2,i;
__device__ void recursiveTraverse(int lev, struct Edge * adjacencyList[],int level[],int parent[],int i,int vertices){
int par;
struct Edge * traverse;
if ((level[i] == lev)&(i<vertices)) {
flag = 1;
traverse = adjacencyList[i];
par = i;
printf("%d \n",par);
while (traverse != NULL) {
if (level[traverse->vertex] != -1) {
traverse = traverse->next;
continue;
}
level[traverse->vertex] = lev + 1;
printf("%d ",level[traverse->vertex]);
parent[traverse->vertex] = par;
traverse = traverse->next;
++i;
recursiveTraverse(lev,adjacencyList,level,parent,i,vertices);
}
}else if(i<vertices){
++i;
recursiveTraverse(lev,adjacencyList,level,parent,i,vertices);
}
}
// Inserts Node to the Linked List by Head Insertion - O(1)
// Returns address of head which is the newly created node.
struct Edge * AddEdge(struct Edge * currentHead, int newVertex)
{
struct Edge * newHead
= (struct Edge *) malloc(sizeof(struct Edge));
newHead->vertex = newVertex;
newHead->next = currentHead;
return newHead;
}
__global__ void BreadthFirstSearch(
struct Edge * adjacencyList[],
int vertices,
int parent[],
int level[],
int startVertices[],int count
){
int i;
// 'lev' represents the level to be assigned
// 'par' represents the parent to be assigned
// 'flag' used to indicate if graph is exhausted
cudaStream_t s1;
cudaStreamCreateWithFlags(&s1,cudaStreamNonBlocking);
lev=0;
for(i=0;i<count;i++){
int k =startVertices[i];
level[k] = lev;
}
// We start at startVertex
while (flag) {
flag = 0;
recursiveTraverse<<<1,1,0,s1>>>(lev,adjacencyList,level,parent,0,vertices);
++lev;
}
}
int main()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
cudaSetDevice(0);
int NNZ;
int noOfRows,noOfCols;
FILE * graphFile =fopen("graph.txt","r");
fscanf(graphFile, "%d %d %d",&noOfRows, &noOfCols, &NNZ);
printf("No fo rows %d, No of Cols %d, nnz %d \n",noOfRows,noOfCols,NNZ); //- done
vertices = noOfRows;
edges =NNZ;
cudaMallocManaged(&parent,vertices*sizeof(int));
cudaMallocManaged(&level,vertices*sizeof(int));
cudaMallocManaged(&adjacencyList,vertices*sizeof(struct Edge *));
cudaMallocManaged(&vertice, 1*sizeof(int));
cudaMallocManaged(&edge, 1*sizeof(int));
cudaMallocManaged(&lev, 1*sizeof(int));
cudaMallocManaged(&flag, 1*sizeof(int));
flag=1;
// Must initialize your array
for (i = 0; i < vertices; ++i) {
adjacencyList[i] = NULL;
parent[i] = 0;
level[i] = -1;
}
for (i = 0; i < edges; ++i) {
int val;
fscanf(graphFile, "%d %d %d",&v1, &v2, &val);
// Adding edge v1 --> v2
adjacencyList[v1] = AddEdge(adjacencyList[v1], v2);
// Adding edge v2 --> v1
// Remove this if you want a Directed Graph
// adjacencyList[v2] = AddEdge(adjacencyList[v2], v1);
}
// Printing Adjacency List
printf("\nAdjacency List - of graph \n\n");
for (i = 0; i < vertices; ++i) {
printf("adjacencyList[%d] -> ", i);
struct Edge * traverse = adjacencyList[i];
while (traverse != NULL) {
printf("%d -> ", traverse->vertex);
traverse = traverse->next;
}
printf("NULL\n");
}
printf("geting starting list of inputs:\n");
FILE * vectorFile= fopen("input.txt","r");
fscanf(vectorFile,"%d",&startArrayCount);
// int inputsCircuits[startArrayCount];
cudaMallocManaged(&inputsCircuits,startArrayCount*sizeof(int));
for(i=0;i<startArrayCount;i++){
int tempVal;
fscanf(vectorFile,"%d",&tempVal);
inputsCircuits[i]= tempVal;
printf("%d ,",inputsCircuits[i]);
}
printf("\n");
cudaEvent_t start,stop;
float elapsedtime;
cudaEventCreate(&start);
cudaEventRecord(start,0);
BreadthFirstSearch<<<1,1>>>(adjacencyList, vertices, parent, level, inputsCircuits ,startArrayCount);
cudaDeviceSynchronize();
checkCudaError();
//stop Time measurement
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedtime,start,stop);
fprintf(stderr,"Time spent for kernel : %.10f seconds\n",elapsedtime/(float)1000);
// Printing Level and Parent Arrays
printf("\nLevel and Parent Arrays -\n");
for (i = 1; i <= vertices; ++i) {
printf("Level of Vertex %d is %d, Parent is %d\n",
i, level[i], parent[i]);
}
return 0;
}
|
b3c428bac37c7977fb83eb736a841d444127f5ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "Prerequisites.cuh"
#include "Angles.cuh"
#include "CTF.cuh"
#include "CubicInterp.cuh"
#include "DeviceFunctions.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
#include "Transformation.cuh"
namespace gtom
{
//////////////////////////////////////////////////////
//Calculate power spectrum based on multiple regions//
//////////////////////////////////////////////////////
void d_CTFPeriodogram(tfloat* d_image, int2 dimsimage, float overlapfraction, int2 dimsregion, int2 dimspadded, tfloat* d_output2d, bool dopost)
{
// Create uniform grid over the image
int2 regions;
int3* h_origins = GetEqualGridSpacing(dimsimage, dimsregion, overlapfraction, regions);
int3* d_origins = (int3*)CudaMallocFromHostArray(h_origins, Elements2(regions) * sizeof(int3));
free(h_origins);
int norigins = Elements2(regions);
tfloat* d_temp2d;
hipMalloc((void**)&d_temp2d, ElementsFFT2(dimspadded) * norigins * sizeof(tfloat));
// Call the custom-grid version to extract 2D spectra
d_CTFPeriodogram(d_image, dimsimage, d_origins, norigins, dimsregion, dimspadded, d_temp2d, dopost);
d_ReduceMean(d_temp2d, d_output2d, ElementsFFT2(dimspadded), norigins);
hipFree(d_temp2d);
hipFree(d_origins);
}
void d_CTFPeriodogram(tfloat* d_image, int2 dimsimage, int3* d_origins, int norigins, int2 dimsregion, int2 dimspadded, tfloat* d_output2d, bool dopost, hipfftHandle planforw, tfloat* d_extracted, tcomplex* d_extractedft)
{
hipfftHandle ownplanforw = planforw;
if (planforw == NULL)
ownplanforw = d_FFTR2CGetPlan(2, toInt3(dimspadded), norigins);
int memlimit = 128 << 20;
int batchsize = norigins; // tmin(norigins, memlimit / (int)(Elements2(dimsregion) * 2 * sizeof(tfloat)));
tfloat* d_ownextracted;
if (d_extracted == NULL)
hipMalloc((void**)&d_ownextracted, norigins * Elements2(dimspadded) * sizeof(tfloat));
else
d_ownextracted = d_extracted;
tcomplex* d_ownextractedft;
if (d_extractedft == NULL)
hipMalloc((void**)&d_ownextractedft, norigins * ElementsFFT2(dimspadded) * sizeof(tcomplex));
else
d_ownextractedft = d_extractedft;
d_ExtractMany(d_image, d_ownextracted, toInt3(dimsimage), toInt3(dimsregion), d_origins, norigins);
//d_WriteMRC(d_ownextracted, toInt3(dimsregion.x, dimsregion.y, curbatch), "d_ownextracted.mrc");
d_NormMonolithic(d_ownextracted, d_ownextracted, Elements2(dimsregion), T_NORM_MEAN01STD, norigins);
tfloat radius = dimsregion.x * 3 / 4.0f / 2;
d_SphereMask(d_ownextracted, d_ownextracted, toInt3(dimsregion), &radius, dimsregion.x * 1 / 4.0f / 2, NULL, norigins);
//d_HammingMask(d_extracted, d_extracted, toInt3(dimsregion), &radius, NULL, norigins);
//d_HammingMaskBorderDistance(d_extracted, d_extracted, toInt3(dimsregion), dimsregion.x / 4, curbatch);
if (dimsregion.x != dimspadded.x || dimsregion.y != dimspadded.y)
{
d_Pad(d_ownextracted, (tfloat*)d_ownextractedft, toInt3(dimsregion), toInt3(dimspadded), T_PAD_VALUE, (tfloat)0, norigins);
//d_NormMonolithic((tfloat*)d_ownextractedft, d_ownextracted, Elements2(dimspadded), T_NORM_MEAN01STD, curbatch);
}
else
{
//d_NormMonolithic(d_ownextracted, d_ownextracted, Elements2(dimspadded), T_NORM_MEAN01STD, curbatch);
}
//d_WriteMRC(d_ownextracted, toInt3(dimspadded.x, dimspadded.y, norigins), "d_ownextracted.mrc");
d_FFTR2C(d_ownextracted, d_ownextractedft, &ownplanforw);
//d_WriteMRC(d_ownextracted, toInt3(dimspadded.x / 2 + 1, dimspadded.y, curbatch), "d_ownextractedft.mrc");
if (dopost)
{
d_Abs(d_ownextractedft, d_ownextracted, norigins * ElementsFFT2(dimspadded));
d_AddScalar(d_ownextracted, d_ownextracted, norigins * ElementsFFT2(dimspadded), (tfloat)1e-6);
d_Log(d_ownextracted, d_ownextracted, norigins * ElementsFFT2(dimspadded));
d_MultiplyByVector(d_ownextracted, d_ownextracted, d_ownextracted, ElementsFFT2(dimspadded) * norigins);
}
else
{
d_Abs(d_ownextractedft, d_output2d, norigins * ElementsFFT2(dimspadded));
}
//d_RemapHalfFFT2Half(d_ownextracted, d_output2d, toInt3(dimspadded), norigins);
//d_WriteMRC(d_output2d, toInt3(dimspadded.x / 2 + 1, dimspadded.y, norigins), "d_extractedoutput.mrc");
if (d_extractedft == NULL)
hipFree(d_extractedft);
if (d_extracted == NULL)
hipFree(d_ownextracted);
if (planforw == NULL)
hipfftDestroy(ownplanforw);
}
} | b3c428bac37c7977fb83eb736a841d444127f5ce.cu | #include "Prerequisites.cuh"
#include "Angles.cuh"
#include "CTF.cuh"
#include "CubicInterp.cuh"
#include "DeviceFunctions.cuh"
#include "FFT.cuh"
#include "Generics.cuh"
#include "Helper.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
#include "Transformation.cuh"
namespace gtom
{
//////////////////////////////////////////////////////
//Calculate power spectrum based on multiple regions//
//////////////////////////////////////////////////////
void d_CTFPeriodogram(tfloat* d_image, int2 dimsimage, float overlapfraction, int2 dimsregion, int2 dimspadded, tfloat* d_output2d, bool dopost)
{
// Create uniform grid over the image
int2 regions;
int3* h_origins = GetEqualGridSpacing(dimsimage, dimsregion, overlapfraction, regions);
int3* d_origins = (int3*)CudaMallocFromHostArray(h_origins, Elements2(regions) * sizeof(int3));
free(h_origins);
int norigins = Elements2(regions);
tfloat* d_temp2d;
cudaMalloc((void**)&d_temp2d, ElementsFFT2(dimspadded) * norigins * sizeof(tfloat));
// Call the custom-grid version to extract 2D spectra
d_CTFPeriodogram(d_image, dimsimage, d_origins, norigins, dimsregion, dimspadded, d_temp2d, dopost);
d_ReduceMean(d_temp2d, d_output2d, ElementsFFT2(dimspadded), norigins);
cudaFree(d_temp2d);
cudaFree(d_origins);
}
void d_CTFPeriodogram(tfloat* d_image, int2 dimsimage, int3* d_origins, int norigins, int2 dimsregion, int2 dimspadded, tfloat* d_output2d, bool dopost, cufftHandle planforw, tfloat* d_extracted, tcomplex* d_extractedft)
{
cufftHandle ownplanforw = planforw;
if (planforw == NULL)
ownplanforw = d_FFTR2CGetPlan(2, toInt3(dimspadded), norigins);
int memlimit = 128 << 20;
int batchsize = norigins; // tmin(norigins, memlimit / (int)(Elements2(dimsregion) * 2 * sizeof(tfloat)));
tfloat* d_ownextracted;
if (d_extracted == NULL)
cudaMalloc((void**)&d_ownextracted, norigins * Elements2(dimspadded) * sizeof(tfloat));
else
d_ownextracted = d_extracted;
tcomplex* d_ownextractedft;
if (d_extractedft == NULL)
cudaMalloc((void**)&d_ownextractedft, norigins * ElementsFFT2(dimspadded) * sizeof(tcomplex));
else
d_ownextractedft = d_extractedft;
d_ExtractMany(d_image, d_ownextracted, toInt3(dimsimage), toInt3(dimsregion), d_origins, norigins);
//d_WriteMRC(d_ownextracted, toInt3(dimsregion.x, dimsregion.y, curbatch), "d_ownextracted.mrc");
d_NormMonolithic(d_ownextracted, d_ownextracted, Elements2(dimsregion), T_NORM_MEAN01STD, norigins);
tfloat radius = dimsregion.x * 3 / 4.0f / 2;
d_SphereMask(d_ownextracted, d_ownextracted, toInt3(dimsregion), &radius, dimsregion.x * 1 / 4.0f / 2, NULL, norigins);
//d_HammingMask(d_extracted, d_extracted, toInt3(dimsregion), &radius, NULL, norigins);
//d_HammingMaskBorderDistance(d_extracted, d_extracted, toInt3(dimsregion), dimsregion.x / 4, curbatch);
if (dimsregion.x != dimspadded.x || dimsregion.y != dimspadded.y)
{
d_Pad(d_ownextracted, (tfloat*)d_ownextractedft, toInt3(dimsregion), toInt3(dimspadded), T_PAD_VALUE, (tfloat)0, norigins);
//d_NormMonolithic((tfloat*)d_ownextractedft, d_ownextracted, Elements2(dimspadded), T_NORM_MEAN01STD, curbatch);
}
else
{
//d_NormMonolithic(d_ownextracted, d_ownextracted, Elements2(dimspadded), T_NORM_MEAN01STD, curbatch);
}
//d_WriteMRC(d_ownextracted, toInt3(dimspadded.x, dimspadded.y, norigins), "d_ownextracted.mrc");
d_FFTR2C(d_ownextracted, d_ownextractedft, &ownplanforw);
//d_WriteMRC(d_ownextracted, toInt3(dimspadded.x / 2 + 1, dimspadded.y, curbatch), "d_ownextractedft.mrc");
if (dopost)
{
d_Abs(d_ownextractedft, d_ownextracted, norigins * ElementsFFT2(dimspadded));
d_AddScalar(d_ownextracted, d_ownextracted, norigins * ElementsFFT2(dimspadded), (tfloat)1e-6);
d_Log(d_ownextracted, d_ownextracted, norigins * ElementsFFT2(dimspadded));
d_MultiplyByVector(d_ownextracted, d_ownextracted, d_ownextracted, ElementsFFT2(dimspadded) * norigins);
}
else
{
d_Abs(d_ownextractedft, d_output2d, norigins * ElementsFFT2(dimspadded));
}
//d_RemapHalfFFT2Half(d_ownextracted, d_output2d, toInt3(dimspadded), norigins);
//d_WriteMRC(d_output2d, toInt3(dimspadded.x / 2 + 1, dimspadded.y, norigins), "d_extractedoutput.mrc");
if (d_extractedft == NULL)
cudaFree(d_extractedft);
if (d_extracted == NULL)
cudaFree(d_ownextracted);
if (planforw == NULL)
cufftDestroy(ownplanforw);
}
} |
6dd300442107c500559585e63ff0903f6a636d0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "timer.h"
/*
* **CUDA KERNEL**
*
* Compute the sum of two matrices
* C[i] = A[i] + B[i]
*
*/
/*__global__ void matAdd(int N, float* a, float* b, float* c) {
}*/
__global__ void matAdd(int N, float *matrixA, float *matrixB, float *matrixC)
{
int threadCol = blockIdx.x * blockDim.x + threadIdx.x;
int threadRow = blockIdx.y * blockDim.y + threadIdx.y;
int indexOfMatrix = threadCol + threadRow * N;
if(threadCol < N && threadRow < N)
matrixC[indexOfMatrix] = matrixA[indexOfMatrix] + matrixB[indexOfMatrix];
}
void compute_mat_add(int N, float *a, float* b, float *c);
/*
*
* Host code to drive the CUDA Kernel
*
*/
int main() {
float *d_a, *d_b, *d_c;
float *h_a, *h_b, *h_c, *h_temp;
int i;
int N = 256;
//struct stopwatch_t* timer = NULL;
long double t_pcie_htd, t_pcie_dth, t_kernel, t_cpu;
/* Setup timers */
//stopwatch_init ();
//timer = stopwatch_create ();
/*
Create the matrices
*/
h_a = (float *) malloc(sizeof(float) * N * N);
h_b = (float *) malloc(sizeof(float) * N * N);
h_c = (float *) malloc(sizeof(float) * N * N);
/*
Set the initial values of h_a, h_b, and h_c
*/
for (i=0; i < N * N; i++) {
h_a[i] = (float) (rand() % 100) / 10.0;
h_b[i] = (float) (rand() % 100) / 10.0;
h_c[i] = 0.0;
}
/*
Allocate space on the GPU
*/
CUDA_CHECK_ERROR(hipMalloc(&d_a, sizeof(float) * N * N));
CUDA_CHECK_ERROR(hipMalloc(&d_b, sizeof(float) * N * N));
CUDA_CHECK_ERROR(hipMalloc(&d_c, sizeof(float) * N * N));
/*
Copy d_a and d_b from CPU to GPU
*/
//stopwatch_start (timer);
CUDA_CHECK_ERROR(hipMemcpy(d_a, h_a, sizeof(float) * N * N,
hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(d_b, h_b, sizeof(float) * N * N,
hipMemcpyHostToDevice));
CUDA_CHECK_ERROR(hipMemcpy(d_c, h_c, sizeof(float) * N * N,
hipMemcpyHostToDevice));
//t_pcie_htd = stopwatch_stop (timer);
//t_pcie_htd = stopwatch_stop (timer);
fprintf (stderr, "Time to transfer data from host to device: %Lg secs\n",
t_pcie_htd);
/*
Run N/256 blocks of 256 threads each
*/
dim3 GS(N/16, N/16, 1);
dim3 BS(16, 16, 1);
//dim3 GS(1);
//dim3 BS(N,N);
//stopwatch_start (timer);hipLaunchKernelGGL((
matAdd), dim3(GS), dim3(BS), 0, 0, N, d_a, d_b, d_c);
hipDeviceSynchronize ();
//t_kernel = stopwatch_stop (timer);
fprintf (stderr, "Time to execute GPU kernel: %Lg secs\n",
t_kernel);
/*
Copy d_cfrom GPU to CPU
*/
//stopwatch_start (timer);
CUDA_CHECK_ERROR(hipMemcpy(h_c, d_c, sizeof(float) * N * N,
hipMemcpyDeviceToHost));
//t_pcie_dth = stopwatch_stop (timer);
fprintf (stderr, "Time to transfer data from device to host: %Lg secs\n",
t_pcie_dth);
/*
Double check errors
*/
h_temp = (float *) malloc(sizeof(float) * N * N);
//stopwatch_start (timer);
compute_mat_add (N, h_a, h_b, h_temp);
//t_cpu = stopwatch_stop (timer);
fprintf (stderr, "Time to execute CPU program: %Lg secs\n",
t_cpu);
int cnt = 0;
for(int i = 0; i < N * N; i++) {
if(abs(h_temp[i] - h_c[i]) > 1e-5) cnt++;
}
fprintf(stderr, "number of errors: %d out of %d\n", cnt, N * N);
/*
Free the device memory
*/
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
/*
Free the host memory
*/
free(h_a);
free(h_b);
free(h_c);
/*
Free timer
*/
//stopwatch_destroy (timer);
if(cnt == 0) {
printf("\n\nSuccess\n");
}
}
void
compute_mat_add(int N, float *a, float* b, float *c) {
// FIXME
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
c[i * N + j] = a[i * N + j] + b[i * N + j];
}
}
}
| 6dd300442107c500559585e63ff0903f6a636d0f.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "timer.h"
/*
* **CUDA KERNEL**
*
* Compute the sum of two matrices
* C[i] = A[i] + B[i]
*
*/
/*__global__ void matAdd(int N, float* a, float* b, float* c) {
}*/
__global__ void matAdd(int N, float *matrixA, float *matrixB, float *matrixC)
{
int threadCol = blockIdx.x * blockDim.x + threadIdx.x;
int threadRow = blockIdx.y * blockDim.y + threadIdx.y;
int indexOfMatrix = threadCol + threadRow * N;
if(threadCol < N && threadRow < N)
matrixC[indexOfMatrix] = matrixA[indexOfMatrix] + matrixB[indexOfMatrix];
}
void compute_mat_add(int N, float *a, float* b, float *c);
/*
*
* Host code to drive the CUDA Kernel
*
*/
int main() {
float *d_a, *d_b, *d_c;
float *h_a, *h_b, *h_c, *h_temp;
int i;
int N = 256;
//struct stopwatch_t* timer = NULL;
long double t_pcie_htd, t_pcie_dth, t_kernel, t_cpu;
/* Setup timers */
//stopwatch_init ();
//timer = stopwatch_create ();
/*
Create the matrices
*/
h_a = (float *) malloc(sizeof(float) * N * N);
h_b = (float *) malloc(sizeof(float) * N * N);
h_c = (float *) malloc(sizeof(float) * N * N);
/*
Set the initial values of h_a, h_b, and h_c
*/
for (i=0; i < N * N; i++) {
h_a[i] = (float) (rand() % 100) / 10.0;
h_b[i] = (float) (rand() % 100) / 10.0;
h_c[i] = 0.0;
}
/*
Allocate space on the GPU
*/
CUDA_CHECK_ERROR(cudaMalloc(&d_a, sizeof(float) * N * N));
CUDA_CHECK_ERROR(cudaMalloc(&d_b, sizeof(float) * N * N));
CUDA_CHECK_ERROR(cudaMalloc(&d_c, sizeof(float) * N * N));
/*
Copy d_a and d_b from CPU to GPU
*/
//stopwatch_start (timer);
CUDA_CHECK_ERROR(cudaMemcpy(d_a, h_a, sizeof(float) * N * N,
cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(d_b, h_b, sizeof(float) * N * N,
cudaMemcpyHostToDevice));
CUDA_CHECK_ERROR(cudaMemcpy(d_c, h_c, sizeof(float) * N * N,
cudaMemcpyHostToDevice));
//t_pcie_htd = stopwatch_stop (timer);
//t_pcie_htd = stopwatch_stop (timer);
fprintf (stderr, "Time to transfer data from host to device: %Lg secs\n",
t_pcie_htd);
/*
Run N/256 blocks of 256 threads each
*/
dim3 GS(N/16, N/16, 1);
dim3 BS(16, 16, 1);
//dim3 GS(1);
//dim3 BS(N,N);
//stopwatch_start (timer);
matAdd<<<GS, BS>>>(N, d_a, d_b, d_c);
cudaThreadSynchronize ();
//t_kernel = stopwatch_stop (timer);
fprintf (stderr, "Time to execute GPU kernel: %Lg secs\n",
t_kernel);
/*
Copy d_cfrom GPU to CPU
*/
//stopwatch_start (timer);
CUDA_CHECK_ERROR(cudaMemcpy(h_c, d_c, sizeof(float) * N * N,
cudaMemcpyDeviceToHost));
//t_pcie_dth = stopwatch_stop (timer);
fprintf (stderr, "Time to transfer data from device to host: %Lg secs\n",
t_pcie_dth);
/*
Double check errors
*/
h_temp = (float *) malloc(sizeof(float) * N * N);
//stopwatch_start (timer);
compute_mat_add (N, h_a, h_b, h_temp);
//t_cpu = stopwatch_stop (timer);
fprintf (stderr, "Time to execute CPU program: %Lg secs\n",
t_cpu);
int cnt = 0;
for(int i = 0; i < N * N; i++) {
if(abs(h_temp[i] - h_c[i]) > 1e-5) cnt++;
}
fprintf(stderr, "number of errors: %d out of %d\n", cnt, N * N);
/*
Free the device memory
*/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
/*
Free the host memory
*/
free(h_a);
free(h_b);
free(h_c);
/*
Free timer
*/
//stopwatch_destroy (timer);
if(cnt == 0) {
printf("\n\nSuccess\n");
}
}
void
compute_mat_add(int N, float *a, float* b, float *c) {
// FIXME
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
c[i * N + j] = a[i * N + j] + b[i * N + j];
}
}
}
|
b8bceaf23aea3ccb9012fa13c4ee7846a1692063.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#if defined(__HIPCC__)
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <c10/hip/HIPMathCompat.h>
#elif defined(__HIPCC__)
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <c10/hip/HIPMathCompat.h>
#endif
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void copysign_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "copysign_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return c10::hip::compat::copysign(a, b);
});
});
}
REGISTER_DISPATCH(copysign_stub, ©sign_kernel_cuda);
}} // namespace at::native
| b8bceaf23aea3ccb9012fa13c4ee7846a1692063.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#if defined(__CUDACC__)
#include <cuda.h>
#include <cuda_fp16.h>
#include <c10/cuda/CUDAMathCompat.h>
#elif defined(__HIPCC__)
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <c10/hip/HIPMathCompat.h>
#endif
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void copysign_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), "copysign_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return c10::cuda::compat::copysign(a, b);
});
});
}
REGISTER_DISPATCH(copysign_stub, ©sign_kernel_cuda);
}} // namespace at::native
|
21997ab2a37e8658f6729ca2c19451bced16dc02.hip | // !!! This is a file automatically generated by hipify!!!
// C++
#include <iostream>
#include <string>
// C
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
// CUDA
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime_api.h>
// Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC)
#pragma warning(push,4)
#define DEBUG 0
#define MICRO 0
#define MACRO 0
#define QUEUE 0
// CUDA is dumb
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ float atomicAdd(float* address, float val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#if QUEUE
const int MaxBlocks = 10000;
const int SliceSize = 100;
#endif
/******************************************************************************/
/*** 2-opt with random restarts ***********************************************/
/******************************************************************************/
// Euclidean distance
#define dist(a, b) (sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y)))
#define swap(a, b) {float tmp = a; a = b; b = tmp;}
static __device__ int climbs_d = 0;
static __device__ float best_d = INT_MAX;
#if QUEUE
static __device__ int restart_d = 0;
#endif
// Buffer space, used for cache and maximum propagation
#if DEBUG
#if MICRO
static __device__ unsigned long long int d_lDuration = 0;
static __device__ unsigned long long int d_cDuration = 0;
static __device__ unsigned long long int d_pDuration = 0;
static __device__ long long int load_duration[128] = {0};
static __device__ long long int compute_duration[128] = {0};
static __device__ long long int propagate_duration[128] = {0};
#endif
#if MACRO
static __device__ unsigned long long int d_uDuration = 0;
static __device__ unsigned long long int d_sDuration = 0;
static __device__ long long int update_duration[128] = {0};
static __device__ long long int single_duration[128] = {0};
#endif
#endif
// Instrumentation
#define LOG( X ) { if( DEBUG ) {X();} }
// Load
static __device__ void inline load_start() {
#if MICRO
if(threadIdx.x == 0) {load_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline load_end() {
#if MICRO
if(threadIdx.x == 0) {load_duration[blockIdx.x] += clock64();}
#endif
}
// Compute
static __device__ void inline compute_start() {
#if MICRO
if(threadIdx.x == 0) {compute_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline compute_end() {
#if MICRO
if(threadIdx.x == 0) {compute_duration[blockIdx.x] += clock64();}
#endif
}
// Compute
static __device__ void inline propagate_start() {
#if MICRO
if(threadIdx.x == 0) {propagate_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline propagate_end() {
#if sMICRO
if(threadIdx.x == 0) {propagate_duration[blockIdx.x] += clock64();}
#endif
}
// Single_iter
static __device__ void inline single_start() {
#if MACRO
if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline single_end() {
#if MACRO
if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] += clock64();}
#endif
}
// Update
static __device__ void inline update_start() {
#if MACRO
if(threadIdx.x == 0) {update_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline update_end() {
#if MACRO
if(threadIdx.x == 0) {update_duration[blockIdx.x] += clock64();}
#endif
}
enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS};
// Data structure used to hold position along path
struct __align__(8) Data {
float x,y;
};
#if QUEUE
//
// Returns a unique integer value with the initial value being 0
//
// Synchronizes so not safe for branches
//
// @return - Returns the next unique int
//
static __device__ inline int
nextSlice(float* __restrict__ w_buffer) {
if(threadIdx.x==0) {
w_buffer[0] = atomicAdd(&restart_d, SliceSize);
}__syncthreads();
return w_buffer[0];
}
#endif
// Allocates and initializes my global memory and shared memory.
//
// @pos - An array that need to be initialized and will hold our path points
// @weight - An array that need to be initialized and will hold our edge weights
// @cities - The amount of points in our graph
//
// @return - Returns true if initialization was successful, false otherwise.
//
template <int TileSize>
static inline __device__ bool
initMemory(const Data* &pos_d, Data* &pos, float* &weight, const int &cities) {
// Shared memory is required to share the allocated memory
__shared__ Data *d;
__shared__ float *w;
if(threadIdx.x == 0 ) {
d = new Data[cities + 1];
if( d != NULL ) {
w = new float[cities];
if( w == NULL ) {
delete[] d;
d = NULL;
}
}
}__syncthreads();
if(d == NULL) {
return false;
}
// Save new memory locations
pos = d;
weight = w;
for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i];
__syncthreads();
return true;
}
//
// Each thread gives some floating point value, then the maximum of them is returned.
//
// @t_val - The number that the thread submits as a candidate for the maximum value
// @cities - The number of cities.
//
// @return - The maximum value of t_val seen from all threads
template <ThreadBufferStatus Status, int TileSize>
static inline __device__ int
maximum(float t_val, const int &cities, float* __restrict__ &w_buffer) {
LOG( propagate_start );
int upper = min(blockDim.x,min(TileSize,cities));
// We have to condense things down
if(Status == MORE_THREADS_THAN_BUFFER) {
// Compute your index and then try to shove what you have in the buffer
const int Index = threadIdx.x % TileSize;
w_buffer[Index] = t_val;
__syncthreads();
// Now try to win (someone will win)
for(int i = 0 ; i <= (blockDim.x /TileSize); ++i ) {
if(t_val < w_buffer[Index]) {
w_buffer[Index] = t_val;
}
}
}else { // Otherwise we have more than enough room!
w_buffer[threadIdx.x] = t_val;
}__syncthreads();
#pragma unroll 4
for( int i = 512; i > 32 ; i /= 2 ) {
if (TileSize > i && blockDim.x > i) {
int offset = (upper + 1) / 2;
if( threadIdx.x < offset) {
float tmp = w_buffer[threadIdx.x + offset];
if(tmp < t_val) {
w_buffer[threadIdx.x] = t_val = tmp;
}
}__syncthreads();
upper = offset;
}
}
// 64 and down
if(threadIdx.x < 32) {
// Yes. upper = 32. w_buffer[tid] = t_val = min(t_val,w_buffer[threadIdx.x + 16]
if(TileSize > 32 && blockDim.x > 32) {
float tmp = w_buffer[threadIdx.x + (upper+1)/2];
if(tmp < t_val) {
w_buffer[threadIdx.x] = t_val = tmp;
}
}
for( int i = 16; i > 0; i = i / 2 ) {
if(threadIdx.x < i) {
float tmp = w_buffer[threadIdx.x + i];
if(tmp < t_val) {
w_buffer[threadIdx.x] = t_val = tmp;
}
}
}
}__syncthreads();
LOG( propagate_end );
return w_buffer[0];
}
//
// After we find the best four position to reconnect with all we need to
// reverse the path between them.
//
// @start - The first position in the sub-path we have to swap with the end
// @end - The last position in the path we have to swap with the start
// @pos - The positions in our path
// @weights - The edge weights between points
//
// TODO: Is it better to reverse the weight or just recompute it?
//
static inline __device__ void
reverse(int start, int end, Data* &pos, float* &weight) {
while(start<end) {
float w = weight[start];
Data d = pos[start];
weight[start] = weight[end-1];
pos[start] = pos[end];
weight[end-1] = w;
pos[end] = d;
start += blockDim.x;
end -= blockDim.x;
}__syncthreads();
}
//
// Perform a single iteration of Two-Opt
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <int TileSize>
static __device__ void
singleIter(Data* &pos, float* &weight, float &minchange, int &mini, int &minj, const int &cities, float* __restrict__ x_buffer, float* __restrict__ y_buffer, float* __restrict__ w_buffer) {
LOG( single_start );
//
// The tour is divided into segments. Each segment has a length of
// the number of threads, except possibly the last one.
//
// We traverse through the segments. When we are in a segment each
// city in the segment of the tour is given to a thread. Then we
// begin scanning each city from the end of the tour until we reach
// the current city. Later threads will terminate this process earlier
// than earlier threads.
//
// During each scan we will evaluate if it is better to reverse the path
// between the two cities. If so we check to see if that is better than
// any other possible reversal we have seen.
//
// After we have done this for all segments then we call update. Update
// make some modification to the tour given the set of best reversals
// seen by each thread.
//
//
for (int leading = 0; leading < cities - 2; leading += blockDim.x) {
int i = leading + threadIdx.x;
float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1;
if (i < cities - 2) {
minchange -= weight[i];
pxi0 = pos[i].x;
pyi0 = pos[i].y;
pxi1 = pos[i+1].x;
pyi1 = pos[i+1].y;
pxj1 = pos[0].x;
pyj1 = pos[0].y;
}
for (int trailing = cities - 1; trailing >= leading + 2; trailing -= TileSize) {
int bound = trailing - TileSize + 1; // The lower bound on what we can load
//
// Load the shared memory cache
//
// Each thread will try to load adjacent elements
//
LOG( load_start );
for(int k = threadIdx.x; k < TileSize; k += blockDim.x) {
int cache_idx = k + bound;
if (cache_idx >= (leading + 2)) { // Never go below the lowest city
x_buffer[k] = pos[cache_idx].x;
y_buffer[k] = pos[cache_idx].y;
w_buffer[k] = weight[cache_idx];
}
}__syncthreads();
LOG( load_end );
LOG( compute_start );
// Compute the lower bound that we can see
int lower = bound;
if (lower < i + 2) lower = i + 2;
// Go over loaded cache that everyone will use
for (int current = trailing; current >= lower; current--) {
int cache_idx = current - bound;
float pxj0 = x_buffer[cache_idx];
float pyj0 = y_buffer[cache_idx];
float change = w_buffer[cache_idx]
+ (sqrtf((pxi0 - pxj0) * (pxi0 - pxj0) + (pyi0 - pyj0) * (pyi0 - pyj0)))
+ (sqrtf((pxi1 - pxj1) * (pxi1 - pxj1) + (pyi1 - pyj1) * (pyi1 - pyj1)));
// Shift down
pxj1 = pxj0;
pyj1 = pyj0;
// If better save it and where we found it
if (minchange > change) {
minchange = change;
mini = i;
minj = current;
}
}__syncthreads();
LOG( compute_end );
}
if (i < cities - 2) {
minchange += weight[i];
}
}
LOG( single_end );
}
//
// Perform the swaps to the edges i and j to decrease the total length of our
// path and update the weight and pos arrays appropriately.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <ThreadBufferStatus Status, int TileSize>
static __device__ bool
update(Data* &pos, float* &weight, float &minchange, int &mini, int &minj, const int &cities, float* __restrict__ w_buffer) {
LOG( update_start );
// For all threads, find the best change
maximum<Status,TileSize>(minchange, cities, w_buffer);
// If we don't have one, oh well.
if(w_buffer[0] >= 0) {
LOG( update_end );
return false;
}
// While we have an update
while(w_buffer[0] < 0 ) {
// If we have multiple bests, pick one
if (minchange == w_buffer[0]) {
w_buffer[1] = threadIdx.x;
}__syncthreads();
// Get what which indices to swap
if(threadIdx.x==w_buffer[1]) {
w_buffer[2] = mini;
w_buffer[3] = minj;
}__syncthreads();
// Give them to each thread
int mi = (int)w_buffer[2];
int mj = (int)w_buffer[3];
// If we are overlapping the best then we can't be used
if(!(minj < (mi - 1)) && !(mini > (mj + 1))) {
minchange = 0;
}
// Reverse the path between the nodes selected
reverse(mi+1+threadIdx.x,mj-threadIdx.x,pos,weight);
// Fix connecting edges weights for the endpoints
weight[mi] = -dist(mi,mi+1);
weight[mj] = -dist(mj,mj+1);
__syncthreads(); // Wait for everyone
// Get the next best
maximum<Status,TileSize>(minchange, cities, w_buffer);
}
LOG( update_end );
return true;
}
//
// Given a path we randomly permute it into a new new path and then initialize the weights of the path.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @cities - The number of cities along the path (excluding the end point)
static __device__ inline void
permute(Data* &pos, float* &weight, const int &cities, hiprandState_t &rndstate) {
if (threadIdx.x == 0) { // serial permutation
for (int i = 1; i < cities; i++) {
int j = hiprand(&rndstate) % (cities - 1) + 1;
Data d = pos[i];
pos[i] = pos[j];
pos[j] = d;
}
pos[cities] = pos[0];
}__syncthreads();
for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(i, i + 1);
__syncthreads();
}
//
// Releases memory and saves results
//
// @pos - Pointer to allocated path memory
// @weight - Pointer to allocated edge weight memory
// @local_climbs - The number of climbs performed by this block
// @best_length - The best length this block found.
static __device__ inline void
cleanup(Data* &pos, float* &weight, int &local_climbs, float &best_length) {
if (threadIdx.x == 0) {
// Save data
atomicAdd(&climbs_d,local_climbs);
atomicMin(&best_d, best_length);
// Release memory
delete pos;
delete weight;
#if DEBUG
#if MICRO
atomicAdd(&d_lDuration,load_duration[blockIdx.x]);
atomicAdd(&d_cDuration,compute_duration[blockIdx.x]);
atomicAdd(&d_pDuration,propagate_duration[blockIdx.x]);
#endif
#if MACRO
atomicAdd(&d_uDuration,update_duration[blockIdx.x]);
atomicAdd(&d_sDuration,single_duration[blockIdx.x]);
#endif
#endif
}
}
//
// Perform iterative two-opt until there can be no more swaps to reduce the path length.
//
// @pos_d - The position of each point in the graph.
// @cities - The number of vertices in the graph
template <ThreadBufferStatus Status, int TileSize>
static __global__ __launch_bounds__(1024, 2) void
TwoOpt(const int Restarts, const Data *pos_d, const int cities) {
Data* pos;
float* weight;
int local_climbs = 0;
float best_length = INT_MAX;
hiprandState_t rndstate;
//hiprand_init(blockIdx.x , 0, 0, &rndstate);
__shared__ float x_buffer[TileSize];
__shared__ float y_buffer[TileSize];
__shared__ int w_buffer[TileSize];
// Initialize the memory, if cannot then output error and exit
if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) {
if(threadIdx.x == 0) {
printf("Memory initialization error for block %d\n", blockIdx.x);
}
return;
}
#if QUEUE
for(int slice = nextSlice(w_buffer) ; slice < Restarts; slice = nextSlice(w_buffer)) { // get smaller blocks
for( int r = slice ; r < slice + SliceSize && r < Restarts ; ++r ) {
#else
for(int r = blockIdx.x; r < Restarts; r += gridDim.x) { // even blocks
#endif
if( local_climbs % 10 == 0 ) {
hiprand_init( blockIdx.x + gridDim.x * local_climbs , 0, 0, &rndstate);
}
int mini,minj,minchange;
// Give our current path we need to permute it
permute(pos,weight,cities,rndstate);
// Keep applying two-opt until we reach some local
// (or global) minimum on the length
do {
++local_climbs;
minchange = mini = minj = 0;
singleIter<TileSize>(pos, weight, minchange, mini, minj, cities, x_buffer, y_buffer, w_buffer);
} while (update<Status,TileSize>(pos, weight, minchange, mini, minj, cities, w_buffer));
// Calculate the length of the path
w_buffer[0] = 0;
__syncthreads();
float term = 0;
for (int i = threadIdx.x; i < cities; i += blockDim.x) {
term += dist(i, i + 1);
}
atomicAdd(w_buffer,term);
__syncthreads();
// If better then save it to my local best
if(threadIdx.x == 0) {
if(w_buffer[0] < best_length) {
best_length = w_buffer[0];
}
}
#if QUEUE
}
#endif
}
// Release all my resources, and save the best seen
// with any other statistics
cleanup(pos, weight, local_climbs, best_length);
}
//
// Checks to see if an error occurred with CUDA and if so prints out the message passed and the CUDA
// error then quits the application.
//
// @msg - Message to print out if error occurs
static void
CudaTest(char *msg) {
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
system("PAUSE");
exit(-1);
}
}
#define mallocOnGPU(addr, size) if (hipSuccess != hipMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory");
#define copyToGPU(to, from, size) if (hipSuccess != hipMemcpy(to, from, size, hipMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed");
//
// Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are not supported
//
// @fname - The name of the file to read the TSP data from
// @pos_d - Pointer to the pointer that will hold data on GPU
// and is modified here to be the address on the GPU
//
// @return - Returns the number of cities found
static int
readInput(const char *fname, Data **pos_d) {
int ch, cnt, in1, cities;
float in2, in3;
FILE *f;
Data *pos;
char str[256]; // potential for buffer overrun
f = fopen(fname, "rt");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", fname); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);}
pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3)) {
pos[cnt].x = in2;
pos[cnt].y = in3;
++cnt;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);}
mallocOnGPU(*pos_d, sizeof(Data) * cities);
copyToGPU(*pos_d, pos, sizeof(Data) * cities);
fclose(f);
delete (pos);
return cities;
}
//
// Given an enum value return it's string representation
//
// @status - The enum value to translate
//
// @return - The enums string representation in the source code
static const std::string
getName(const ThreadBufferStatus status) {
switch(status) {
case MORE_THREADS_THAN_BUFFER:
return std::string("MORE_THREADS_THAN_BUFFER");
case EQUAL_SIZE:
return std::string("EQUAL_SIZE");
case MORE_BUFFER_THAN_THREADS:
return std::string("MORE_BUFFER_THAN_THREADS");
};
return std::string("enum value not found.");
}
int getMaxSharedMemory( int major ) {
if(major < 3) {
return 16384;
}else if(major < 5) {
return 32768;
}else {
return 65536;
}
}
//
// Calculates the maximum number of resident blocks that the card can hold
//
// @Threads - Number of threads that each block will have
// @Shared_Bytes - The amount of bytes each block will allocate
//
// @return - Returns the number of blocks the card can have resident
static int
getMaxBlocks(const int Shared_Bytes, const int Threads) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props,0);
std::cout << "Compute Version " << props.major << "." << props.minor << std::endl;
/* 5.x or higher */
int numBlocks = 0;
int Max_Shared = 65536;
int Max_Blocks = 32;
const int Block_Thread_Limit = 2048 / Threads;
if(props.major < 3) {
Max_Shared = 16384;
Max_Blocks = 8;
}else if(props.major < 5) {
Max_Shared = 49152;
Max_Blocks = 16;
}
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
numBlocks = props.multiProcessorCount * min(Max_Blocks,min(Block_Shared_Limit,Block_Thread_Limit));
#if QUEUE
numBlocks = max(MaxBlocks, numBlocks );
#endif
return numBlocks;
}
//
// Given an integer returns the next multiple of 32 greater than or equal to it.
//
// @in - The integer to round to next multiple of 32
//
// @return - Returns the next multiple of 32 that is greater than or equals to in
static int
next32(int in) {
return ((in + 31) / 32 ) * 32;
}
//
// Handle ThreadBufferStatus kernel selection
//
template <int TileSize>
static float
_wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) {
const int Shared_Bytes = (sizeof(int) + 2*sizeof(float)) * TileSize;
const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes + 16,Threads));
const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE;
float time;
const int Device_Memory = (sizeof(int) + sizeof(Data)) * (Cities + 1)* 2*Blocks;
hipDeviceSetLimit(hipLimitMallocHeapSize, Device_Memory);
CudaTest("Change heap size");
// Output runtime configuration
std::cout << "Blocks = " << Blocks
<< ", Threads = " << Threads
<< ", TileSize = " << TileSize
<< ", Status = " << getName(Status)
<< ", Shared Bytes = " << Shared_Bytes
<< ", Device Memory = " << Device_Memory/(1024.0f*1024.0f) << "MB" << std::endl;
#if QUEUE
std::cout << "SliceSize = " << SliceSize << std::endl;
#endif
hipEvent_t begin,end;
hipEventCreate(&begin);
hipEventCreate(&end);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
hipProfilerStart();
switch(Status) {
case MORE_THREADS_THAN_BUFFER:
hipEventRecord(begin,0);
hipLaunchKernelGGL(( TwoOpt<MORE_THREADS_THAN_BUFFER,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities);
CudaTest("Kernel Call");
hipEventRecord(end,0);
hipEventSynchronize(end);
break;
case EQUAL_SIZE:
hipEventRecord(begin,0);
hipLaunchKernelGGL(( TwoOpt<EQUAL_SIZE,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities);
CudaTest("Kernel Call");
hipEventRecord(end,0);
hipEventSynchronize(end);
break;
case MORE_BUFFER_THAN_THREADS:
hipEventRecord(begin,0);
hipLaunchKernelGGL(( TwoOpt<MORE_BUFFER_THAN_THREADS,TileSize>), dim3(Blocks),dim3(Threads), 0, 0, Restarts,Pos_d,Cities);
CudaTest("Kernel Call");
hipEventRecord(end,0);
hipEventSynchronize(end);
break;
};
hipProfilerStop();
hipEventElapsedTime(&time,begin,end);
hipEventDestroy(begin);
hipEventDestroy(end);
return time;
}
//
// Choose the parameters
//
template<int p, int i>
class Recur {
public:
static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) {
if( i == TileSize ) {
return _wrapStatus<i>( Restarts , Threads , Pos , Cities );
}else {
return Recur<p,i-32>::recur( Cities , Pos , Restarts , Threads , TileSize );
}
}
};
//
// Default
//
template<int p>
class Recur<p,0> {
public:
static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props,0);
int sharedMemBytes = getMaxSharedMemory( props.major ) / (2 * (sizeof(int) + 2 * sizeof(float)));
if( sharedMemBytes < 1344 && sharedMemBytes >= 1024 ) {
return _wrapStatus<1024>(Restarts,Threads,Pos,Cities);
} else if( sharedMemBytes < 2048 && sharedMemBytes >= 1344 ) {
return _wrapStatus<1344>(Restarts,Threads,Pos,Cities);
}else if( sharedMemBytes >= 2048 ) {
return _wrapStatus<2048>(Restarts,Threads,Pos,Cities);
}else {
std::cout << "Invalid TileSize = " << TileSize << std::endl;
exit(-1);
}
return -1;
}
};
//
// Auto-generate templates so I don't have to.
//
// Runs through each possible value form 0 to 1024
//
float
RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) {
return Recur<1024,1024>::recur( Cities , Pos , Restarts , Threads , TileSize );
}
//
// Main entry point to program.
//
//
// argv[0] - program name
// argv[1] - input file
// argv[2] - restarts
// argv[3] - threads
// argv[4] - shared memory
//
int
main(int argc, char *argv[]) {
if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);}
const int Restarts = atoi(argv[2]);
if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);}
Data *pos_d;
const int Cities = readInput(argv[1], &pos_d); // Load data to GPU
printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]);
// Make sure we are a multiple of 32 and less than 1024
const int Threads = (argc >= 4) ? min(1024,next32(atoi(argv[3]))) : min(1024,next32(Cities));
// How big is our shared memory
const int TileSize = (argc >= 5) ? min( next32(atoi(argv[4])),2048) : Threads;
// Run the kernel
const float time = RunKernel(Cities,pos_d,Restarts,Threads,TileSize);
// Synchronize (just in case)
hipDeviceSynchronize();
// how long it took
int hours = (int)(time / (3600.0f * 1000.0f));
int seconds = (int)(time/1000) % 60;
int minutes = (int)((time/1000) / 60) % 60;
// Grab the data
int climbs,best;
hipMemcpyFromSymbol(&climbs,climbs_d,sizeof(int),0,hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&best,best_d,sizeof(int),0,hipMemcpyDeviceToHost);
#if DEBUG
#if MICRO
long long pd,cd,ld;
hipMemcpyFromSymbol(&pd,propagate_duration,sizeof(int),0,hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&cd,compute_duration,sizeof(int),0,hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&ld,load_duration,sizeof(int),0,hipMemcpyDeviceToHost);
#else
long long sd,ud;
hipMemcpyFromSymbol(&sd,single_duration,sizeof(int),0,hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&ud,update_duration,sizeof(int),0,hipMemcpyDeviceToHost);
#endif
#endif
// Output
long long moves = 1LL * climbs * (Cities - 2) * (Cities - 1) / 2;
std::cout << "Number of two-opts " << climbs << std::endl;
std::cout << moves * 0.000001 / time << "Gmoves/s" << std::endl;
std::cout << "best found tour length = " << best << std::endl;
std::cout << "Total Time : " << time / 1000.0f << "s" << std::endl;
std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(time) % 1000 << std::endl;
#if DEBUG
#if MICRO
std::cout << "Propagate: " << pd << std::endl;
std::cout << "Load: " << ld << std::endl;
std::cout << "Compute: " << cd << std::endl;
#else
std::cout << "Single: " << sd << std::endl;
std::cout << "Update: " << ud << std::endl;
#endif
#endif
// Reset and free all the data
hipDeviceReset();
hipFree(pos_d);
return 0;
}
| 21997ab2a37e8658f6729ca2c19451bced16dc02.cu |
// C++
#include <iostream>
#include <string>
// C
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
// CUDA
#include <cuda.h>
#include <curand_kernel.h>
#include <cuda_profiler_api.h>
// Force -Wall after this point, VC only (Check https://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html for GCC)
#pragma warning(push,4)
#define DEBUG 0
#define MICRO 0
#define MACRO 0
#define QUEUE 0
// CUDA is dumb
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ float atomicAdd(float* address, float val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#if QUEUE
const int MaxBlocks = 10000;
const int SliceSize = 100;
#endif
/******************************************************************************/
/*** 2-opt with random restarts ***********************************************/
/******************************************************************************/
// Euclidean distance
#define dist(a, b) (sqrtf((pos[a].x - pos[b].x) * (pos[a].x - pos[b].x) + (pos[a].y - pos[b].y) * (pos[a].y - pos[b].y)))
#define swap(a, b) {float tmp = a; a = b; b = tmp;}
static __device__ int climbs_d = 0;
static __device__ float best_d = INT_MAX;
#if QUEUE
static __device__ int restart_d = 0;
#endif
// Buffer space, used for cache and maximum propagation
#if DEBUG
#if MICRO
static __device__ unsigned long long int d_lDuration = 0;
static __device__ unsigned long long int d_cDuration = 0;
static __device__ unsigned long long int d_pDuration = 0;
static __device__ long long int load_duration[128] = {0};
static __device__ long long int compute_duration[128] = {0};
static __device__ long long int propagate_duration[128] = {0};
#endif
#if MACRO
static __device__ unsigned long long int d_uDuration = 0;
static __device__ unsigned long long int d_sDuration = 0;
static __device__ long long int update_duration[128] = {0};
static __device__ long long int single_duration[128] = {0};
#endif
#endif
// Instrumentation
#define LOG( X ) { if( DEBUG ) {X();} }
// Load
static __device__ void inline load_start() {
#if MICRO
if(threadIdx.x == 0) {load_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline load_end() {
#if MICRO
if(threadIdx.x == 0) {load_duration[blockIdx.x] += clock64();}
#endif
}
// Compute
static __device__ void inline compute_start() {
#if MICRO
if(threadIdx.x == 0) {compute_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline compute_end() {
#if MICRO
if(threadIdx.x == 0) {compute_duration[blockIdx.x] += clock64();}
#endif
}
// Compute
static __device__ void inline propagate_start() {
#if MICRO
if(threadIdx.x == 0) {propagate_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline propagate_end() {
#if sMICRO
if(threadIdx.x == 0) {propagate_duration[blockIdx.x] += clock64();}
#endif
}
// Single_iter
static __device__ void inline single_start() {
#if MACRO
if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline single_end() {
#if MACRO
if(threadIdx.x == 0 && DEBUG) {single_duration[blockIdx.x] += clock64();}
#endif
}
// Update
static __device__ void inline update_start() {
#if MACRO
if(threadIdx.x == 0) {update_duration[blockIdx.x] -= clock64();}
#endif
}
static __device__ void inline update_end() {
#if MACRO
if(threadIdx.x == 0) {update_duration[blockIdx.x] += clock64();}
#endif
}
enum ThreadBufferStatus {MORE_THREADS_THAN_BUFFER,EQUAL_SIZE,MORE_BUFFER_THAN_THREADS};
// Data structure used to hold position along path
struct __align__(8) Data {
float x,y;
};
#if QUEUE
//
// Returns a unique integer value with the initial value being 0
//
// Synchronizes so not safe for branches
//
// @return - Returns the next unique int
//
static __device__ inline int
nextSlice(float* __restrict__ w_buffer) {
if(threadIdx.x==0) {
w_buffer[0] = atomicAdd(&restart_d, SliceSize);
}__syncthreads();
return w_buffer[0];
}
#endif
// Allocates and initializes my global memory and shared memory.
//
// @pos - An array that need to be initialized and will hold our path points
// @weight - An array that need to be initialized and will hold our edge weights
// @cities - The amount of points in our graph
//
// @return - Returns true if initialization was successful, false otherwise.
//
template <int TileSize>
static inline __device__ bool
initMemory(const Data* &pos_d, Data* &pos, float* &weight, const int &cities) {
// Shared memory is required to share the allocated memory
__shared__ Data *d;
__shared__ float *w;
if(threadIdx.x == 0 ) {
d = new Data[cities + 1];
if( d != NULL ) {
w = new float[cities];
if( w == NULL ) {
delete[] d;
d = NULL;
}
}
}__syncthreads();
if(d == NULL) {
return false;
}
// Save new memory locations
pos = d;
weight = w;
for (int i = threadIdx.x; i < cities; i += blockDim.x) pos[i] = pos_d[i];
__syncthreads();
return true;
}
//
// Each thread gives some floating point value, then the maximum of them is returned.
//
// @t_val - The number that the thread submits as a candidate for the maximum value
// @cities - The number of cities.
//
// @return - The maximum value of t_val seen from all threads
template <ThreadBufferStatus Status, int TileSize>
static inline __device__ int
maximum(float t_val, const int &cities, float* __restrict__ &w_buffer) {
LOG( propagate_start );
int upper = min(blockDim.x,min(TileSize,cities));
// We have to condense things down
if(Status == MORE_THREADS_THAN_BUFFER) {
// Compute your index and then try to shove what you have in the buffer
const int Index = threadIdx.x % TileSize;
w_buffer[Index] = t_val;
__syncthreads();
// Now try to win (someone will win)
for(int i = 0 ; i <= (blockDim.x /TileSize); ++i ) {
if(t_val < w_buffer[Index]) {
w_buffer[Index] = t_val;
}
}
}else { // Otherwise we have more than enough room!
w_buffer[threadIdx.x] = t_val;
}__syncthreads();
#pragma unroll 4
for( int i = 512; i > 32 ; i /= 2 ) {
if (TileSize > i && blockDim.x > i) {
int offset = (upper + 1) / 2;
if( threadIdx.x < offset) {
float tmp = w_buffer[threadIdx.x + offset];
if(tmp < t_val) {
w_buffer[threadIdx.x] = t_val = tmp;
}
}__syncthreads();
upper = offset;
}
}
// 64 and down
if(threadIdx.x < 32) {
// Yes. upper = 32. w_buffer[tid] = t_val = min(t_val,w_buffer[threadIdx.x + 16]
if(TileSize > 32 && blockDim.x > 32) {
float tmp = w_buffer[threadIdx.x + (upper+1)/2];
if(tmp < t_val) {
w_buffer[threadIdx.x] = t_val = tmp;
}
}
for( int i = 16; i > 0; i = i / 2 ) {
if(threadIdx.x < i) {
float tmp = w_buffer[threadIdx.x + i];
if(tmp < t_val) {
w_buffer[threadIdx.x] = t_val = tmp;
}
}
}
}__syncthreads();
LOG( propagate_end );
return w_buffer[0];
}
//
// After we find the best four position to reconnect with all we need to
// reverse the path between them.
//
// @start - The first position in the sub-path we have to swap with the end
// @end - The last position in the path we have to swap with the start
// @pos - The positions in our path
// @weights - The edge weights between points
//
// TODO: Is it better to reverse the weight or just recompute it?
//
static inline __device__ void
reverse(int start, int end, Data* &pos, float* &weight) {
while(start<end) {
float w = weight[start];
Data d = pos[start];
weight[start] = weight[end-1];
pos[start] = pos[end];
weight[end-1] = w;
pos[end] = d;
start += blockDim.x;
end -= blockDim.x;
}__syncthreads();
}
//
// Perform a single iteration of Two-Opt
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <int TileSize>
static __device__ void
singleIter(Data* &pos, float* &weight, float &minchange, int &mini, int &minj, const int &cities, float* __restrict__ x_buffer, float* __restrict__ y_buffer, float* __restrict__ w_buffer) {
LOG( single_start );
//
// The tour is divided into segments. Each segment has a length of
// the number of threads, except possibly the last one.
//
// We traverse through the segments. When we are in a segment each
// city in the segment of the tour is given to a thread. Then we
// begin scanning each city from the end of the tour until we reach
// the current city. Later threads will terminate this process earlier
// than earlier threads.
//
// During each scan we will evaluate if it is better to reverse the path
// between the two cities. If so we check to see if that is better than
// any other possible reversal we have seen.
//
// After we have done this for all segments then we call update. Update
// make some modification to the tour given the set of best reversals
// seen by each thread.
//
//
for (int leading = 0; leading < cities - 2; leading += blockDim.x) {
int i = leading + threadIdx.x;
float pxi0, pyi0, pxi1, pyi1, pxj1, pyj1;
if (i < cities - 2) {
minchange -= weight[i];
pxi0 = pos[i].x;
pyi0 = pos[i].y;
pxi1 = pos[i+1].x;
pyi1 = pos[i+1].y;
pxj1 = pos[0].x;
pyj1 = pos[0].y;
}
for (int trailing = cities - 1; trailing >= leading + 2; trailing -= TileSize) {
int bound = trailing - TileSize + 1; // The lower bound on what we can load
//
// Load the shared memory cache
//
// Each thread will try to load adjacent elements
//
LOG( load_start );
for(int k = threadIdx.x; k < TileSize; k += blockDim.x) {
int cache_idx = k + bound;
if (cache_idx >= (leading + 2)) { // Never go below the lowest city
x_buffer[k] = pos[cache_idx].x;
y_buffer[k] = pos[cache_idx].y;
w_buffer[k] = weight[cache_idx];
}
}__syncthreads();
LOG( load_end );
LOG( compute_start );
// Compute the lower bound that we can see
int lower = bound;
if (lower < i + 2) lower = i + 2;
// Go over loaded cache that everyone will use
for (int current = trailing; current >= lower; current--) {
int cache_idx = current - bound;
float pxj0 = x_buffer[cache_idx];
float pyj0 = y_buffer[cache_idx];
float change = w_buffer[cache_idx]
+ (sqrtf((pxi0 - pxj0) * (pxi0 - pxj0) + (pyi0 - pyj0) * (pyi0 - pyj0)))
+ (sqrtf((pxi1 - pxj1) * (pxi1 - pxj1) + (pyi1 - pyj1) * (pyi1 - pyj1)));
// Shift down
pxj1 = pxj0;
pyj1 = pyj0;
// If better save it and where we found it
if (minchange > change) {
minchange = change;
mini = i;
minj = current;
}
}__syncthreads();
LOG( compute_end );
}
if (i < cities - 2) {
minchange += weight[i];
}
}
LOG( single_end );
}
//
// Perform the swaps to the edges i and j to decrease the total length of our
// path and update the weight and pos arrays appropriately.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @minchange - The current best change we can make
// @mini - The ith city in the path that is part of the swap
// @minj - The jth city in the path that is part of the swap
// @cities - The number of cities along the path (excluding the end point)
template <ThreadBufferStatus Status, int TileSize>
static __device__ bool
update(Data* &pos, float* &weight, float &minchange, int &mini, int &minj, const int &cities, float* __restrict__ w_buffer) {
LOG( update_start );
// For all threads, find the best change
maximum<Status,TileSize>(minchange, cities, w_buffer);
// If we don't have one, oh well.
if(w_buffer[0] >= 0) {
LOG( update_end );
return false;
}
// While we have an update
while(w_buffer[0] < 0 ) {
// If we have multiple bests, pick one
if (minchange == w_buffer[0]) {
w_buffer[1] = threadIdx.x;
}__syncthreads();
// Get what which indices to swap
if(threadIdx.x==w_buffer[1]) {
w_buffer[2] = mini;
w_buffer[3] = minj;
}__syncthreads();
// Give them to each thread
int mi = (int)w_buffer[2];
int mj = (int)w_buffer[3];
// If we are overlapping the best then we can't be used
if(!(minj < (mi - 1)) && !(mini > (mj + 1))) {
minchange = 0;
}
// Reverse the path between the nodes selected
reverse(mi+1+threadIdx.x,mj-threadIdx.x,pos,weight);
// Fix connecting edges weights for the endpoints
weight[mi] = -dist(mi,mi+1);
weight[mj] = -dist(mj,mj+1);
__syncthreads(); // Wait for everyone
// Get the next best
maximum<Status,TileSize>(minchange, cities, w_buffer);
}
LOG( update_end );
return true;
}
//
// Given a path we randomly permute it into a new new path and then initialize the weights of the path.
//
// @pos - The current Hamiltonian path
// @weight - The current weight of our edges along the path
// @cities - The number of cities along the path (excluding the end point)
static __device__ inline void
permute(Data* &pos, float* &weight, const int &cities, curandState &rndstate) {
if (threadIdx.x == 0) { // serial permutation
for (int i = 1; i < cities; i++) {
int j = curand(&rndstate) % (cities - 1) + 1;
Data d = pos[i];
pos[i] = pos[j];
pos[j] = d;
}
pos[cities] = pos[0];
}__syncthreads();
for (int i = threadIdx.x; i < cities; i += blockDim.x) weight[i] = -dist(i, i + 1);
__syncthreads();
}
//
// Releases memory and saves results
//
// @pos - Pointer to allocated path memory
// @weight - Pointer to allocated edge weight memory
// @local_climbs - The number of climbs performed by this block
// @best_length - The best length this block found.
static __device__ inline void
cleanup(Data* &pos, float* &weight, int &local_climbs, float &best_length) {
if (threadIdx.x == 0) {
// Save data
atomicAdd(&climbs_d,local_climbs);
atomicMin(&best_d, best_length);
// Release memory
delete pos;
delete weight;
#if DEBUG
#if MICRO
atomicAdd(&d_lDuration,load_duration[blockIdx.x]);
atomicAdd(&d_cDuration,compute_duration[blockIdx.x]);
atomicAdd(&d_pDuration,propagate_duration[blockIdx.x]);
#endif
#if MACRO
atomicAdd(&d_uDuration,update_duration[blockIdx.x]);
atomicAdd(&d_sDuration,single_duration[blockIdx.x]);
#endif
#endif
}
}
//
// Perform iterative two-opt until there can be no more swaps to reduce the path length.
//
// @pos_d - The position of each point in the graph.
// @cities - The number of vertices in the graph
template <ThreadBufferStatus Status, int TileSize>
static __global__ __launch_bounds__(1024, 2) void
TwoOpt(const int Restarts, const Data *pos_d, const int cities) {
Data* pos;
float* weight;
int local_climbs = 0;
float best_length = INT_MAX;
curandState rndstate;
//curand_init(blockIdx.x , 0, 0, &rndstate);
__shared__ float x_buffer[TileSize];
__shared__ float y_buffer[TileSize];
__shared__ int w_buffer[TileSize];
// Initialize the memory, if cannot then output error and exit
if( !initMemory<TileSize>(pos_d,pos,weight,cities) ) {
if(threadIdx.x == 0) {
printf("Memory initialization error for block %d\n", blockIdx.x);
}
return;
}
#if QUEUE
for(int slice = nextSlice(w_buffer) ; slice < Restarts; slice = nextSlice(w_buffer)) { // get smaller blocks
for( int r = slice ; r < slice + SliceSize && r < Restarts ; ++r ) {
#else
for(int r = blockIdx.x; r < Restarts; r += gridDim.x) { // even blocks
#endif
if( local_climbs % 10 == 0 ) {
curand_init( blockIdx.x + gridDim.x * local_climbs , 0, 0, &rndstate);
}
int mini,minj,minchange;
// Give our current path we need to permute it
permute(pos,weight,cities,rndstate);
// Keep applying two-opt until we reach some local
// (or global) minimum on the length
do {
++local_climbs;
minchange = mini = minj = 0;
singleIter<TileSize>(pos, weight, minchange, mini, minj, cities, x_buffer, y_buffer, w_buffer);
} while (update<Status,TileSize>(pos, weight, minchange, mini, minj, cities, w_buffer));
// Calculate the length of the path
w_buffer[0] = 0;
__syncthreads();
float term = 0;
for (int i = threadIdx.x; i < cities; i += blockDim.x) {
term += dist(i, i + 1);
}
atomicAdd(w_buffer,term);
__syncthreads();
// If better then save it to my local best
if(threadIdx.x == 0) {
if(w_buffer[0] < best_length) {
best_length = w_buffer[0];
}
}
#if QUEUE
}
#endif
}
// Release all my resources, and save the best seen
// with any other statistics
cleanup(pos, weight, local_climbs, best_length);
}
//
// Checks to see if an error occurred with CUDA and if so prints out the message passed and the CUDA
// error then quits the application.
//
// @msg - Message to print out if error occurs
static void
CudaTest(char *msg) {
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
system("PAUSE");
exit(-1);
}
}
#define mallocOnGPU(addr, size) if (cudaSuccess != cudaMalloc((void **)&addr, size)) fprintf(stderr, "could not allocate GPU memory\n"); CudaTest("couldn't allocate GPU memory");
#define copyToGPU(to, from, size) if (cudaSuccess != cudaMemcpy(to, from, size, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of data to device failed\n"); CudaTest("data copy to device failed");
//
// Read TPS lib files into GPU memory. ATT and CEIL_2D edge weight types are not supported
//
// @fname - The name of the file to read the TSP data from
// @pos_d - Pointer to the pointer that will hold data on GPU
// and is modified here to be the address on the GPU
//
// @return - Returns the number of cities found
static int
readInput(const char *fname, Data **pos_d) {
int ch, cnt, in1, cities;
float in2, in3;
FILE *f;
Data *pos;
char str[256]; // potential for buffer overrun
f = fopen(fname, "rt");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", fname); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);}
pos = new Data[cities]; if (pos == NULL) {fprintf(stderr, "cannot allocate pos\n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3)) {
pos[cnt].x = in2;
pos[cnt].y = in3;
++cnt;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);}
mallocOnGPU(*pos_d, sizeof(Data) * cities);
copyToGPU(*pos_d, pos, sizeof(Data) * cities);
fclose(f);
delete (pos);
return cities;
}
//
// Given an enum value return it's string representation
//
// @status - The enum value to translate
//
// @return - The enums string representation in the source code
static const std::string
getName(const ThreadBufferStatus status) {
switch(status) {
case MORE_THREADS_THAN_BUFFER:
return std::string("MORE_THREADS_THAN_BUFFER");
case EQUAL_SIZE:
return std::string("EQUAL_SIZE");
case MORE_BUFFER_THAN_THREADS:
return std::string("MORE_BUFFER_THAN_THREADS");
};
return std::string("enum value not found.");
}
int getMaxSharedMemory( int major ) {
if(major < 3) {
return 16384;
}else if(major < 5) {
return 32768;
}else {
return 65536;
}
}
//
// Calculates the maximum number of resident blocks that the card can hold
//
// @Threads - Number of threads that each block will have
// @Shared_Bytes - The amount of bytes each block will allocate
//
// @return - Returns the number of blocks the card can have resident
static int
getMaxBlocks(const int Shared_Bytes, const int Threads) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props,0);
std::cout << "Compute Version " << props.major << "." << props.minor << std::endl;
/* 5.x or higher */
int numBlocks = 0;
int Max_Shared = 65536;
int Max_Blocks = 32;
const int Block_Thread_Limit = 2048 / Threads;
if(props.major < 3) {
Max_Shared = 16384;
Max_Blocks = 8;
}else if(props.major < 5) {
Max_Shared = 49152;
Max_Blocks = 16;
}
const int Block_Shared_Limit = (Max_Shared / Shared_Bytes);
numBlocks = props.multiProcessorCount * min(Max_Blocks,min(Block_Shared_Limit,Block_Thread_Limit));
#if QUEUE
numBlocks = max(MaxBlocks, numBlocks );
#endif
return numBlocks;
}
//
// Given an integer returns the next multiple of 32 greater than or equal to it.
//
// @in - The integer to round to next multiple of 32
//
// @return - Returns the next multiple of 32 that is greater than or equals to in
static int
next32(int in) {
return ((in + 31) / 32 ) * 32;
}
//
// Handle ThreadBufferStatus kernel selection
//
template <int TileSize>
static float
_wrapStatus(const int Restarts, const int Threads, const Data *Pos_d, const int Cities) {
const int Shared_Bytes = (sizeof(int) + 2*sizeof(float)) * TileSize;
const int Blocks = min(Restarts,getMaxBlocks(Shared_Bytes + 16,Threads));
const ThreadBufferStatus Status = (Threads > TileSize) ? MORE_THREADS_THAN_BUFFER : (Threads < TileSize) ? MORE_BUFFER_THAN_THREADS : EQUAL_SIZE;
float time;
const int Device_Memory = (sizeof(int) + sizeof(Data)) * (Cities + 1)* 2*Blocks;
cudaDeviceSetLimit(cudaLimitMallocHeapSize, Device_Memory);
CudaTest("Change heap size");
// Output runtime configuration
std::cout << "Blocks = " << Blocks
<< ", Threads = " << Threads
<< ", TileSize = " << TileSize
<< ", Status = " << getName(Status)
<< ", Shared Bytes = " << Shared_Bytes
<< ", Device Memory = " << Device_Memory/(1024.0f*1024.0f) << "MB" << std::endl;
#if QUEUE
std::cout << "SliceSize = " << SliceSize << std::endl;
#endif
cudaEvent_t begin,end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
cudaThreadSetCacheConfig( cudaFuncCachePreferShared );
cudaProfilerStart();
switch(Status) {
case MORE_THREADS_THAN_BUFFER:
cudaEventRecord(begin,0);
TwoOpt<MORE_THREADS_THAN_BUFFER,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities);
CudaTest("Kernel Call");
cudaEventRecord(end,0);
cudaEventSynchronize(end);
break;
case EQUAL_SIZE:
cudaEventRecord(begin,0);
TwoOpt<EQUAL_SIZE,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities);
CudaTest("Kernel Call");
cudaEventRecord(end,0);
cudaEventSynchronize(end);
break;
case MORE_BUFFER_THAN_THREADS:
cudaEventRecord(begin,0);
TwoOpt<MORE_BUFFER_THAN_THREADS,TileSize><<<Blocks,Threads>>>(Restarts,Pos_d,Cities);
CudaTest("Kernel Call");
cudaEventRecord(end,0);
cudaEventSynchronize(end);
break;
};
cudaProfilerStop();
cudaEventElapsedTime(&time,begin,end);
cudaEventDestroy(begin);
cudaEventDestroy(end);
return time;
}
//
// Choose the parameters
//
template<int p, int i>
class Recur {
public:
static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) {
if( i == TileSize ) {
return _wrapStatus<i>( Restarts , Threads , Pos , Cities );
}else {
return Recur<p,i-32>::recur( Cities , Pos , Restarts , Threads , TileSize );
}
}
};
//
// Default
//
template<int p>
class Recur<p,0> {
public:
static float recur( const int Cities, const Data *Pos, const int Restarts, const int Threads , const int TileSize ) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props,0);
int sharedMemBytes = getMaxSharedMemory( props.major ) / (2 * (sizeof(int) + 2 * sizeof(float)));
if( sharedMemBytes < 1344 && sharedMemBytes >= 1024 ) {
return _wrapStatus<1024>(Restarts,Threads,Pos,Cities);
} else if( sharedMemBytes < 2048 && sharedMemBytes >= 1344 ) {
return _wrapStatus<1344>(Restarts,Threads,Pos,Cities);
}else if( sharedMemBytes >= 2048 ) {
return _wrapStatus<2048>(Restarts,Threads,Pos,Cities);
}else {
std::cout << "Invalid TileSize = " << TileSize << std::endl;
exit(-1);
}
return -1;
}
};
//
// Auto-generate templates so I don't have to.
//
// Runs through each possible value form 0 to 1024
//
float
RunKernel(const int Cities, const Data *Pos, const int Restarts, const int Threads, const int TileSize) {
return Recur<1024,1024>::recur( Cities , Pos , Restarts , Threads , TileSize );
}
//
// Main entry point to program.
//
//
// argv[0] - program name
// argv[1] - input file
// argv[2] - restarts
// argv[3] - threads
// argv[4] - shared memory
//
int
main(int argc, char *argv[]) {
if (argc < 3 || argc > 5) {fprintf(stderr, "\narguments: input_file restart_count <threads> <tilesize> \n"); exit(-1);}
const int Restarts = atoi(argv[2]);
if (Restarts < 1) {fprintf(stderr, "restart_count is too small: %d\n", Restarts); exit(-1);}
Data *pos_d;
const int Cities = readInput(argv[1], &pos_d); // Load data to GPU
printf("configuration: %d cities, %d restarts, %s input\n", Cities, Restarts, argv[1]);
// Make sure we are a multiple of 32 and less than 1024
const int Threads = (argc >= 4) ? min(1024,next32(atoi(argv[3]))) : min(1024,next32(Cities));
// How big is our shared memory
const int TileSize = (argc >= 5) ? min( next32(atoi(argv[4])),2048) : Threads;
// Run the kernel
const float time = RunKernel(Cities,pos_d,Restarts,Threads,TileSize);
// Synchronize (just in case)
cudaDeviceSynchronize();
// how long it took
int hours = (int)(time / (3600.0f * 1000.0f));
int seconds = (int)(time/1000) % 60;
int minutes = (int)((time/1000) / 60) % 60;
// Grab the data
int climbs,best;
cudaMemcpyFromSymbol(&climbs,climbs_d,sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&best,best_d,sizeof(int),0,cudaMemcpyDeviceToHost);
#if DEBUG
#if MICRO
long long pd,cd,ld;
cudaMemcpyFromSymbol(&pd,propagate_duration,sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&cd,compute_duration,sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&ld,load_duration,sizeof(int),0,cudaMemcpyDeviceToHost);
#else
long long sd,ud;
cudaMemcpyFromSymbol(&sd,single_duration,sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&ud,update_duration,sizeof(int),0,cudaMemcpyDeviceToHost);
#endif
#endif
// Output
long long moves = 1LL * climbs * (Cities - 2) * (Cities - 1) / 2;
std::cout << "Number of two-opts " << climbs << std::endl;
std::cout << moves * 0.000001 / time << "Gmoves/s" << std::endl;
std::cout << "best found tour length = " << best << std::endl;
std::cout << "Total Time : " << time / 1000.0f << "s" << std::endl;
std::cout << "Hours = " << hours << ", Minutes = " << minutes << ", Seconds = " << seconds << ", Milliseconds = " << (int)(time) % 1000 << std::endl;
#if DEBUG
#if MICRO
std::cout << "Propagate: " << pd << std::endl;
std::cout << "Load: " << ld << std::endl;
std::cout << "Compute: " << cd << std::endl;
#else
std::cout << "Single: " << sd << std::endl;
std::cout << "Update: " << ud << std::endl;
#endif
#endif
// Reset and free all the data
cudaDeviceReset();
cudaFree(pos_d);
return 0;
}
|
c5c0c2f7304bf70230e5125c5e03f93062f7f1d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void multMatrix(int *d1_in, int *d2_in, int *d_out, int n, int m, int k){
int indx = threadIdx.x;
int indy = threadIdx.y;
int ind = indy*k+indx;
//printf("%d %d\n",indy,indx);
if(ind<n*k){
d_out[ind] = 0;
for(int i=0;i<m;i++){
d_out[ind] += d1_in[indy*m+i]*d2_in[i*k+indx];
}
}
} | c5c0c2f7304bf70230e5125c5e03f93062f7f1d4.cu | #include "includes.h"
__global__ void multMatrix(int *d1_in, int *d2_in, int *d_out, int n, int m, int k){
int indx = threadIdx.x;
int indy = threadIdx.y;
int ind = indy*k+indx;
//printf("%d %d\n",indy,indx);
if(ind<n*k){
d_out[ind] = 0;
for(int i=0;i<m;i++){
d_out[ind] += d1_in[indy*m+i]*d2_in[i*k+indx];
}
}
} |
c79a3a7b29c35d37af10bc29032c3cbf44e70673.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define CUDA_CHECK_ERROR
#define CudaSafeCall(err) __CudaSafeCall(err, __FILE__, __LINE__)
#define CudaCheckError() __CudaCheckError(__FILE__, __LINE__)
__global__ void transform_image(float *input, const float *raw_input, const int width, const int channels)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int start_i = thread_id / width - 1;
int start_j = thread_id % width - 1;
int per_channel_width = width * width;
int hidden_width = 3 * 3 * channels + 1;
int global_offset = thread_id * hidden_width;
for (int c = 0; c < channels; c++) {
int offset = 0;
for (int i = start_i; i < start_i + 3; i++) {
if (i < 0 || i == width)
continue;
for (int j = start_j; j < start_j + 3; j++) {
if (j < 0 || j == width)
continue;
input[global_offset + c * 9 + offset] = raw_input[c * per_channel_width + i * width + j];
offset++;
}
}
}
input[(thread_id + 1) * hidden_width - 1] = 1;
} | c79a3a7b29c35d37af10bc29032c3cbf44e70673.cu | #include "includes.h"
#define CUDA_CHECK_ERROR
#define CudaSafeCall(err) __CudaSafeCall(err, __FILE__, __LINE__)
#define CudaCheckError() __CudaCheckError(__FILE__, __LINE__)
__global__ void transform_image(float *input, const float *raw_input, const int width, const int channels)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int start_i = thread_id / width - 1;
int start_j = thread_id % width - 1;
int per_channel_width = width * width;
int hidden_width = 3 * 3 * channels + 1;
int global_offset = thread_id * hidden_width;
for (int c = 0; c < channels; c++) {
int offset = 0;
for (int i = start_i; i < start_i + 3; i++) {
if (i < 0 || i == width)
continue;
for (int j = start_j; j < start_j + 3; j++) {
if (j < 0 || j == width)
continue;
input[global_offset + c * 9 + offset] = raw_input[c * per_channel_width + i * width + j];
offset++;
}
}
}
input[(thread_id + 1) * hidden_width - 1] = 1;
} |
6aa8771ee0896fe727dbf10dd6ef0da0f1959163.hip | // !!! This is a file automatically generated by hipify!!!
#include "hipfft.h"
#include "cutil.h"
#include "mex.h"
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <memcpy.cu>
#include <cubicPrefilter2D.cu>
#include <cubicTex2D.cu>
#define IMUL(a, b) __umul24(a, b)
//////////////////////////////////////////////////////////////////////////////////////
/// Interpolation Kernel
//////////////////////////////////////////////////////////////////////////////////////
texture<float, 2, hipReadModeElementType> texInput;
__global__ void interpTex( float2 *points, float *output, int output_numel, int output_width, int _option){
const int x = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int y = IMUL(blockDim.y, blockIdx.y) + threadIdx.y;
const int loc = IMUL(y,output_width) + x;
if (loc < output_numel) {
switch(_option){
case 0:
output[loc] = tex2D(texInput, points[loc].x, points[loc].y);
break;
case 1:
output[loc] = tex2D(texInput, points[loc].x, points[loc].y);
break;
case 2:
output[loc] = cubicTex2D(texInput, points[loc].x, points[loc].y);
break;
case 3:
output[loc] = cubicTex2D(texInput, points[loc].x, points[loc].y);
break;
default :
output[loc] = tex2D(texInput, points[loc].x, points[loc].y);
break;
}
}
}
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//////////////////////////////////////////////////////////////////////////////////////
/// Main
//////////////////////////////////////////////////////////////////////////////////////
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) {
// Check number of inputs
if (nrhs <3) mexErrMsgTxt("Must have three input arguments: data, rowpoints, colpoints");
if (nlhs !=1) mexErrMsgTxt("Must have one output argument");
if (nrhs > 4) mexErrMsgTxt("This format is not supported, usage : interp2cuda(W,Xi,Yi,method), method is optional ");
// Check the class of input data
if ( mxIsComplex(prhs[0]) || !mxIsClass(prhs[0],"double") ) {
// try casting the input to double?
mexErrMsgTxt("Input must be real, double !");
}
///// Allocate, set up data structures
int OUTPUT_W, OUTPUT_H, OUTPUT_SIZE, INPUT_W, INPUT_H, INPUT_SIZE, POINTS_SIZE ;
int interpo_option;
double *input = mxGetPr(prhs[0]);
INPUT_W = mxGetN(prhs[0]);
INPUT_H = mxGetM(prhs[0]);
INPUT_SIZE = INPUT_H * INPUT_W * sizeof(float);
float *f_input;
float2 *f_points;
///// Check if we're in "input points" or "number of points" mode
if ( mxGetNumberOfElements(prhs[1]) == 1 && mxGetNumberOfElements(prhs[2]) == 1) {
mexErrMsgTxt("This mode is not yet implemented !! usage interp2(Z,Xi,Yi,method)");
// double *row_points = mxGetPr(prhs[1]);
// double *col_points = mxGetPr(prhs[2]);
// // number of points mode
// OUTPUT_W = (int) col_points[0];
// OUTPUT_H = (int) row_points[0];
// OUTPUT_SIZE = OUTPUT_W * OUTPUT_H * sizeof(float);
// POINTS_SIZE = 2*OUTPUT_SIZE;
// // we want N evenly spaced points from 0 to 1
// f_points = (float2 *)mxMalloc(POINTS_SIZE);
// for ( int r=0; r<OUTPUT_H; r++ ) {
// for ( int c=0; c<OUTPUT_W; c++ ) {
// f_points[c + OUTPUT_W*r].x = (float) c * (INPUT_W-1) / (OUTPUT_W-1) + 0.5f;
// f_points[c + OUTPUT_W*r].y = (float) r * (INPUT_H-1) / (OUTPUT_H-1) + 0.5f;
// }
// }
}
else {
double *x_points = mxGetPr(prhs[1]);
double *y_points = mxGetPr(prhs[2]);
int NB_ELEM_Xi,NB_ELEM_Yi;
if (nrhs == 4 || nrhs == 7){ // if interpolation option is passed
mxChar* option = (nrhs == 4) ? mxGetChars(prhs[3]) : mxGetChars(prhs[6]);
if((char)option[0] == 'n' && (char)option[1] == 'e' && (char)option[2] == 'a' && (char)option[3] == 'r' && (char)option[4] == 'e'
&& (char)option[5] == 's' && (char)option[6] == 't'){
interpo_option = 0;
//mexPrintf("nearest\n");
}
else if ((char)option[0] == 'l' && (char)option[1] == 'i' && (char)option[2] == 'n' && (char)option[3] == 'e' && (char)option[4] == 'a'
&& (char)option[5] == 'r'){
interpo_option = 1;
//mexPrintf("linear\n");
}
else if((char)option[0] == 's' && (char)option[1] == 'p' && (char)option[2] == 'l' && (char)option[3] == 'i' && (char)option[4] == 'n'
&& (char)option[5] == 'e'){
interpo_option = 2;
//mexPrintf("spline\n");
}
else if((char)option[0] == 'c' && (char)option[1] == 'u' && (char)option[2] == 'b' && (char)option[3] == 'i' && (char)option[4] == 'c'){
interpo_option = 3;
//mexPrintf("cubic\n");
mexErrMsgTxt("cubic method is not yet supported !!");
}
else{
mexErrMsgTxt("method is not recognized, you must use 'nearest' 'linear' 'spline' or 'cubic'");
}
}
else{
interpo_option = 1;
// mexPrintf("linear\n");
}
NB_ELEM_Xi = mxGetNumberOfElements(prhs[1]);
NB_ELEM_Yi = mxGetNumberOfElements(prhs[2]);
if ( NB_ELEM_Xi != NB_ELEM_Yi){
mexErrMsgTxt("Xi and Yi must have the same size !!");
}
OUTPUT_W = mxGetN(prhs[1]);
OUTPUT_H = mxGetM(prhs[1]);
OUTPUT_SIZE = OUTPUT_W * OUTPUT_H * sizeof(float);
POINTS_SIZE = 2*OUTPUT_SIZE;
f_points = (float2 *)mxMalloc(POINTS_SIZE);
for ( int r=0; r<NB_ELEM_Xi; r++ ) {
f_points[r].x = (float) x_points[r] - 0.5f;
f_points[r].y = (float) y_points[r] - 0.5f;
}
}
plhs[0] = mxCreateDoubleMatrix(OUTPUT_H, OUTPUT_W, mxREAL);
double *output = mxGetPr(plhs[0]);
float *f_output = (float *)mxMalloc(OUTPUT_SIZE);
///// We need to convert the input array from double to float
f_input = (float *)malloc(INPUT_SIZE);
for ( int r=0; r<INPUT_H; r++ ) {
for ( int c=0; c<INPUT_W; c++ ) {
f_input[c + INPUT_W*r] = (float) input[r+ (INPUT_H)*(c)];
}
}
// --- CUDA Part ---
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
mexPrintf( "hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id) );
mexErrMsgTxt("CUDA device not found");
}
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0); //only on device #0
if (INPUT_W > deviceProp.maxTexture2D[0] || INPUT_H > deviceProp.maxTexture2D[1] ){
mexPrintf("One of input dimension is greater than CUDA capabilites\n");
mexPrintf("Max dimension are : (%d,%d)\n",deviceProp.maxTexture2D[0],deviceProp.maxTexture2D[1]);
mexErrMsgTxt("ERROR !!");
}
// Allocate, copy input data into a 2D texture
hipArray *d_input;
hipChannelFormatDesc input_tex = hipCreateChannelDesc<float>();
hipMallocArray(&d_input, &input_tex, INPUT_W, INPUT_H);
hipMemcpyToArray(d_input, 0, 0, f_input, INPUT_SIZE, hipMemcpyHostToDevice);
texInput.filterMode = (interpo_option == 0 ) ? hipFilterModePoint : hipFilterModeLinear;
texInput.normalized = 0;
hipBindTextureToArray(texInput, d_input);
// Allocate, copy points data into a float2*
float2 *d_points;
hipMalloc((void **)&d_points, POINTS_SIZE);
hipMemcpy(d_points, f_points, POINTS_SIZE, hipMemcpyHostToDevice);
// Allocate output space
float *d_output;
hipMalloc((void **)&d_output, OUTPUT_SIZE);
// Set up blocks, grid for parallel processing
dim3 dimBlock(16, 12);
dim3 dimGrid(iDivUp(OUTPUT_W,dimBlock.x),iDivUp(OUTPUT_H,dimBlock.y));
// Run it
hipLaunchKernelGGL(( interpTex), dim3(dimGrid), dim3(dimBlock), 0, 0, d_points, d_output, OUTPUT_W*OUTPUT_H, OUTPUT_W, interpo_option);
// Copy the data back
hipMemcpy(f_output, d_output, OUTPUT_SIZE, hipMemcpyDeviceToHost);
// Convert to double, and we're done
for ( int r=0; r<OUTPUT_H; r++ ) {
for ( int c=0; c<OUTPUT_W; c++ ) {
output[r+OUTPUT_H*c] = (double)f_output[r+OUTPUT_H*c];
}
}
hipUnbindTexture(texInput);
hipFreeArray(d_input);
hipFree(d_points);
hipFree(d_output);
mxFree(f_input);
mxFree(f_output);
mxFree(f_points);
}
| 6aa8771ee0896fe727dbf10dd6ef0da0f1959163.cu | #include "cufft.h"
#include "cutil.h"
#include "mex.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <memcpy.cu>
#include <cubicPrefilter2D.cu>
#include <cubicTex2D.cu>
#define IMUL(a, b) __umul24(a, b)
//////////////////////////////////////////////////////////////////////////////////////
/// Interpolation Kernel
//////////////////////////////////////////////////////////////////////////////////////
texture<float, 2, cudaReadModeElementType> texInput;
__global__ void interpTex( float2 *points, float *output, int output_numel, int output_width, int _option){
const int x = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int y = IMUL(blockDim.y, blockIdx.y) + threadIdx.y;
const int loc = IMUL(y,output_width) + x;
if (loc < output_numel) {
switch(_option){
case 0:
output[loc] = tex2D(texInput, points[loc].x, points[loc].y);
break;
case 1:
output[loc] = tex2D(texInput, points[loc].x, points[loc].y);
break;
case 2:
output[loc] = cubicTex2D(texInput, points[loc].x, points[loc].y);
break;
case 3:
output[loc] = cubicTex2D(texInput, points[loc].x, points[loc].y);
break;
default :
output[loc] = tex2D(texInput, points[loc].x, points[loc].y);
break;
}
}
}
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//////////////////////////////////////////////////////////////////////////////////////
/// Main
//////////////////////////////////////////////////////////////////////////////////////
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) {
// Check number of inputs
if (nrhs <3) mexErrMsgTxt("Must have three input arguments: data, rowpoints, colpoints");
if (nlhs !=1) mexErrMsgTxt("Must have one output argument");
if (nrhs > 4) mexErrMsgTxt("This format is not supported, usage : interp2cuda(W,Xi,Yi,method), method is optional ");
// Check the class of input data
if ( mxIsComplex(prhs[0]) || !mxIsClass(prhs[0],"double") ) {
// try casting the input to double?
mexErrMsgTxt("Input must be real, double !");
}
///// Allocate, set up data structures
int OUTPUT_W, OUTPUT_H, OUTPUT_SIZE, INPUT_W, INPUT_H, INPUT_SIZE, POINTS_SIZE ;
int interpo_option;
double *input = mxGetPr(prhs[0]);
INPUT_W = mxGetN(prhs[0]);
INPUT_H = mxGetM(prhs[0]);
INPUT_SIZE = INPUT_H * INPUT_W * sizeof(float);
float *f_input;
float2 *f_points;
///// Check if we're in "input points" or "number of points" mode
if ( mxGetNumberOfElements(prhs[1]) == 1 && mxGetNumberOfElements(prhs[2]) == 1) {
mexErrMsgTxt("This mode is not yet implemented !! usage interp2(Z,Xi,Yi,method)");
// double *row_points = mxGetPr(prhs[1]);
// double *col_points = mxGetPr(prhs[2]);
// // number of points mode
// OUTPUT_W = (int) col_points[0];
// OUTPUT_H = (int) row_points[0];
// OUTPUT_SIZE = OUTPUT_W * OUTPUT_H * sizeof(float);
// POINTS_SIZE = 2*OUTPUT_SIZE;
// // we want N evenly spaced points from 0 to 1
// f_points = (float2 *)mxMalloc(POINTS_SIZE);
// for ( int r=0; r<OUTPUT_H; r++ ) {
// for ( int c=0; c<OUTPUT_W; c++ ) {
// f_points[c + OUTPUT_W*r].x = (float) c * (INPUT_W-1) / (OUTPUT_W-1) + 0.5f;
// f_points[c + OUTPUT_W*r].y = (float) r * (INPUT_H-1) / (OUTPUT_H-1) + 0.5f;
// }
// }
}
else {
double *x_points = mxGetPr(prhs[1]);
double *y_points = mxGetPr(prhs[2]);
int NB_ELEM_Xi,NB_ELEM_Yi;
if (nrhs == 4 || nrhs == 7){ // if interpolation option is passed
mxChar* option = (nrhs == 4) ? mxGetChars(prhs[3]) : mxGetChars(prhs[6]);
if((char)option[0] == 'n' && (char)option[1] == 'e' && (char)option[2] == 'a' && (char)option[3] == 'r' && (char)option[4] == 'e'
&& (char)option[5] == 's' && (char)option[6] == 't'){
interpo_option = 0;
//mexPrintf("nearest\n");
}
else if ((char)option[0] == 'l' && (char)option[1] == 'i' && (char)option[2] == 'n' && (char)option[3] == 'e' && (char)option[4] == 'a'
&& (char)option[5] == 'r'){
interpo_option = 1;
//mexPrintf("linear\n");
}
else if((char)option[0] == 's' && (char)option[1] == 'p' && (char)option[2] == 'l' && (char)option[3] == 'i' && (char)option[4] == 'n'
&& (char)option[5] == 'e'){
interpo_option = 2;
//mexPrintf("spline\n");
}
else if((char)option[0] == 'c' && (char)option[1] == 'u' && (char)option[2] == 'b' && (char)option[3] == 'i' && (char)option[4] == 'c'){
interpo_option = 3;
//mexPrintf("cubic\n");
mexErrMsgTxt("cubic method is not yet supported !!");
}
else{
mexErrMsgTxt("method is not recognized, you must use 'nearest' 'linear' 'spline' or 'cubic'");
}
}
else{
interpo_option = 1;
// mexPrintf("linear\n");
}
NB_ELEM_Xi = mxGetNumberOfElements(prhs[1]);
NB_ELEM_Yi = mxGetNumberOfElements(prhs[2]);
if ( NB_ELEM_Xi != NB_ELEM_Yi){
mexErrMsgTxt("Xi and Yi must have the same size !!");
}
OUTPUT_W = mxGetN(prhs[1]);
OUTPUT_H = mxGetM(prhs[1]);
OUTPUT_SIZE = OUTPUT_W * OUTPUT_H * sizeof(float);
POINTS_SIZE = 2*OUTPUT_SIZE;
f_points = (float2 *)mxMalloc(POINTS_SIZE);
for ( int r=0; r<NB_ELEM_Xi; r++ ) {
f_points[r].x = (float) x_points[r] - 0.5f;
f_points[r].y = (float) y_points[r] - 0.5f;
}
}
plhs[0] = mxCreateDoubleMatrix(OUTPUT_H, OUTPUT_W, mxREAL);
double *output = mxGetPr(plhs[0]);
float *f_output = (float *)mxMalloc(OUTPUT_SIZE);
///// We need to convert the input array from double to float
f_input = (float *)malloc(INPUT_SIZE);
for ( int r=0; r<INPUT_H; r++ ) {
for ( int c=0; c<INPUT_W; c++ ) {
f_input[c + INPUT_W*r] = (float) input[r+ (INPUT_H)*(c)];
}
}
// --- CUDA Part ---
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
mexPrintf( "cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) );
mexErrMsgTxt("CUDA device not found");
}
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0); //only on device #0
if (INPUT_W > deviceProp.maxTexture2D[0] || INPUT_H > deviceProp.maxTexture2D[1] ){
mexPrintf("One of input dimension is greater than CUDA capabilites\n");
mexPrintf("Max dimension are : (%d,%d)\n",deviceProp.maxTexture2D[0],deviceProp.maxTexture2D[1]);
mexErrMsgTxt("ERROR !!");
}
// Allocate, copy input data into a 2D texture
cudaArray *d_input;
cudaChannelFormatDesc input_tex = cudaCreateChannelDesc<float>();
cudaMallocArray(&d_input, &input_tex, INPUT_W, INPUT_H);
cudaMemcpyToArray(d_input, 0, 0, f_input, INPUT_SIZE, cudaMemcpyHostToDevice);
texInput.filterMode = (interpo_option == 0 ) ? cudaFilterModePoint : cudaFilterModeLinear;
texInput.normalized = 0;
cudaBindTextureToArray(texInput, d_input);
// Allocate, copy points data into a float2*
float2 *d_points;
cudaMalloc((void **)&d_points, POINTS_SIZE);
cudaMemcpy(d_points, f_points, POINTS_SIZE, cudaMemcpyHostToDevice);
// Allocate output space
float *d_output;
cudaMalloc((void **)&d_output, OUTPUT_SIZE);
// Set up blocks, grid for parallel processing
dim3 dimBlock(16, 12);
dim3 dimGrid(iDivUp(OUTPUT_W,dimBlock.x),iDivUp(OUTPUT_H,dimBlock.y));
// Run it
interpTex<<<dimGrid, dimBlock>>> (d_points, d_output, OUTPUT_W*OUTPUT_H, OUTPUT_W, interpo_option);
// Copy the data back
cudaMemcpy(f_output, d_output, OUTPUT_SIZE, cudaMemcpyDeviceToHost);
// Convert to double, and we're done
for ( int r=0; r<OUTPUT_H; r++ ) {
for ( int c=0; c<OUTPUT_W; c++ ) {
output[r+OUTPUT_H*c] = (double)f_output[r+OUTPUT_H*c];
}
}
cudaUnbindTexture(texInput);
cudaFreeArray(d_input);
cudaFree(d_points);
cudaFree(d_output);
mxFree(f_input);
mxFree(f_output);
mxFree(f_points);
}
|
42c045a869ce7981da1a142b4315ba4e26b72af2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include "amir_cuda_util/cuda_util.h"
#include "delta2bbox.h"
namespace amirstan {
namespace plugin {
using namespace amirstan::cuda;
struct SMeanStd {
float mean[4];
float std[4];
};
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t softmax_custom(const scalar_t *data_start,
int step, int class_id,
int num_classes) {
const scalar_t up_val = exp(*(data_start + step * class_id));
scalar_t down_val = 0;
#pragma unroll
for (int i = 0; i < num_classes; ++i) {
down_val += exp(*(data_start + step * i));
}
return up_val / down_val;
}
template <typename T>
__global__ void delta2bbox_kernel(T *out_cls, T *out_bbox, const T *in_cls,
const T *in_bbox, const T *anchor,
const int *clip_range, int batch_size,
int num_bbox, int num_outbbox,
int num_classes, int num_ratios,
bool use_segmoid_cls, SMeanStd mean_std) {
const T max_ratio = abs(logf(16. / 1000.));
const int out_batch_stride = num_outbbox * num_classes;
const int out_bbox_stride = num_classes * num_ratios;
const int in_batch_stride = num_bbox * num_classes * num_ratios;
const int in_ratio_stride = num_bbox * num_classes;
const int in_class_stride = num_bbox;
CUDA_KERNEL_LOOP(i, batch_size * num_outbbox * num_classes) {
int tmp_i = i;
const int batch_id = tmp_i / out_batch_stride;
tmp_i %= out_batch_stride;
const int bbox_id = tmp_i / out_bbox_stride;
if (bbox_id >= num_bbox) {
continue;
}
tmp_i %= out_bbox_stride;
const int ratio_id = tmp_i / num_classes;
if (ratio_id >= num_ratios) {
continue;
}
const int class_id = tmp_i % num_classes;
// cls
const int out_cls_id = batch_id * out_batch_stride +
bbox_id * out_bbox_stride + ratio_id * num_classes +
class_id;
const int in_cls_id = batch_id * in_batch_stride +
ratio_id * in_ratio_stride +
class_id * in_class_stride;
if (use_segmoid_cls) {
out_cls[out_cls_id] = sigmoid<T>(in_cls[in_cls_id + bbox_id]);
} else {
out_cls[out_cls_id] =
softmax_custom<T>(in_cls + batch_id * in_batch_stride +
ratio_id * in_ratio_stride + bbox_id,
num_bbox, class_id, num_classes);
}
// bbox
if (class_id != 0) {
continue;
}
// const int out_bbox_id = out_cls_id * 4 /num_classes;
const int out_bbox_id =
(batch_id * num_outbbox + bbox_id * num_ratios + ratio_id) * 4;
const int in_delta_id =
batch_id * num_bbox * num_ratios + ratio_id * num_bbox;
const T dx =
in_bbox[in_delta_id * 4 + bbox_id] * mean_std.std[0] + mean_std.mean[0];
const T dy =
in_bbox[in_delta_id * 4 + num_bbox + bbox_id] * mean_std.std[1] +
mean_std.mean[1];
const T dw =
in_bbox[in_delta_id * 4 + num_bbox * 2 + bbox_id] * mean_std.std[2] +
mean_std.mean[2];
const T dh =
in_bbox[in_delta_id * 4 + num_bbox * 3 + bbox_id] * mean_std.std[3] +
mean_std.mean[3];
const T clamp_dw = max(-max_ratio, min(max_ratio, dw));
const T clamp_dh = max(-max_ratio, min(max_ratio, dh));
const int anchor_start = (bbox_id * num_ratios + ratio_id) * 4;
const T px = (anchor[anchor_start] + anchor[anchor_start + 2]) * 0.5;
const T py = (anchor[anchor_start + 1] + anchor[anchor_start + 3]) * 0.5;
const T pw = anchor[anchor_start + 2] - anchor[anchor_start];
const T ph = anchor[anchor_start + 3] - anchor[anchor_start + 1];
const T gw = pw * exp(dw);
const T gh = ph * exp(dh);
const T gx = px + pw * dx;
const T gy = py + ph * dy;
const T x1 = gx - gw * 0.5;
const T y1 = gy - gh * 0.5;
const T x2 = gx + gw * 0.5;
const T y2 = gy + gh * 0.5;
if (clip_range != nullptr) {
out_bbox[out_bbox_id] = max(T(0.), min(x1, T(clip_range[1] - 1)));
out_bbox[out_bbox_id + 1] = max(T(0.), min(y1, T(clip_range[0] - 1)));
out_bbox[out_bbox_id + 2] = max(T(0.), min(x2, T(clip_range[1] - 1)));
out_bbox[out_bbox_id + 3] = max(T(0.), min(y2, T(clip_range[0] - 1)));
} else {
out_bbox[out_bbox_id] = x1;
out_bbox[out_bbox_id + 1] = y1;
out_bbox[out_bbox_id + 2] = x2;
out_bbox[out_bbox_id + 3] = y2;
}
}
}
template <typename T>
void delta2bbox(T *out_cls, T *out_bbox, const T *in_cls, const T *in_bbox,
const T *anchor, const int *clip_range, int batch_size,
int num_bbox, int num_outbbox, int num_classes, int num_ratios,
bool use_segmoid_cls, float *mean, float *std,
hipStream_t stream) {
SMeanStd mean_std;
memcpy(&mean_std.mean[0], mean, sizeof(float) * 4);
memcpy(&mean_std.std[0], std, sizeof(float) * 4);
const size_t input_size = batch_size * num_outbbox * num_classes;
hipLaunchKernelGGL(( delta2bbox_kernel<T>), dim3(GET_BLOCKS(input_size)), dim3(CUDA_NUM_THREADS), 0, stream,
out_cls, out_bbox, in_cls, in_bbox, anchor, clip_range, batch_size,
num_bbox, num_outbbox, num_classes, num_ratios, use_segmoid_cls,
mean_std);
}
template void delta2bbox<float>(float *out_cls, float *out_bbox,
const float *in_cls, const float *in_bbox,
const float *anchor, const int *clip_range,
int batch_size, int num_bbox, int num_outbbox,
int num_classes, int num_ratios,
bool use_segmoid_cls, float *mean, float *std,
hipStream_t stream);
} // namespace plugin
} // namespace amirstan
| 42c045a869ce7981da1a142b4315ba4e26b72af2.cu | #include <stdio.h>
#include <algorithm>
#include <cmath>
#include "amir_cuda_util/cuda_util.h"
#include "delta2bbox.h"
namespace amirstan {
namespace plugin {
using namespace amirstan::cuda;
struct SMeanStd {
float mean[4];
float std[4];
};
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t softmax_custom(const scalar_t *data_start,
int step, int class_id,
int num_classes) {
const scalar_t up_val = exp(*(data_start + step * class_id));
scalar_t down_val = 0;
#pragma unroll
for (int i = 0; i < num_classes; ++i) {
down_val += exp(*(data_start + step * i));
}
return up_val / down_val;
}
template <typename T>
__global__ void delta2bbox_kernel(T *out_cls, T *out_bbox, const T *in_cls,
const T *in_bbox, const T *anchor,
const int *clip_range, int batch_size,
int num_bbox, int num_outbbox,
int num_classes, int num_ratios,
bool use_segmoid_cls, SMeanStd mean_std) {
const T max_ratio = abs(logf(16. / 1000.));
const int out_batch_stride = num_outbbox * num_classes;
const int out_bbox_stride = num_classes * num_ratios;
const int in_batch_stride = num_bbox * num_classes * num_ratios;
const int in_ratio_stride = num_bbox * num_classes;
const int in_class_stride = num_bbox;
CUDA_KERNEL_LOOP(i, batch_size * num_outbbox * num_classes) {
int tmp_i = i;
const int batch_id = tmp_i / out_batch_stride;
tmp_i %= out_batch_stride;
const int bbox_id = tmp_i / out_bbox_stride;
if (bbox_id >= num_bbox) {
continue;
}
tmp_i %= out_bbox_stride;
const int ratio_id = tmp_i / num_classes;
if (ratio_id >= num_ratios) {
continue;
}
const int class_id = tmp_i % num_classes;
// cls
const int out_cls_id = batch_id * out_batch_stride +
bbox_id * out_bbox_stride + ratio_id * num_classes +
class_id;
const int in_cls_id = batch_id * in_batch_stride +
ratio_id * in_ratio_stride +
class_id * in_class_stride;
if (use_segmoid_cls) {
out_cls[out_cls_id] = sigmoid<T>(in_cls[in_cls_id + bbox_id]);
} else {
out_cls[out_cls_id] =
softmax_custom<T>(in_cls + batch_id * in_batch_stride +
ratio_id * in_ratio_stride + bbox_id,
num_bbox, class_id, num_classes);
}
// bbox
if (class_id != 0) {
continue;
}
// const int out_bbox_id = out_cls_id * 4 /num_classes;
const int out_bbox_id =
(batch_id * num_outbbox + bbox_id * num_ratios + ratio_id) * 4;
const int in_delta_id =
batch_id * num_bbox * num_ratios + ratio_id * num_bbox;
const T dx =
in_bbox[in_delta_id * 4 + bbox_id] * mean_std.std[0] + mean_std.mean[0];
const T dy =
in_bbox[in_delta_id * 4 + num_bbox + bbox_id] * mean_std.std[1] +
mean_std.mean[1];
const T dw =
in_bbox[in_delta_id * 4 + num_bbox * 2 + bbox_id] * mean_std.std[2] +
mean_std.mean[2];
const T dh =
in_bbox[in_delta_id * 4 + num_bbox * 3 + bbox_id] * mean_std.std[3] +
mean_std.mean[3];
const T clamp_dw = max(-max_ratio, min(max_ratio, dw));
const T clamp_dh = max(-max_ratio, min(max_ratio, dh));
const int anchor_start = (bbox_id * num_ratios + ratio_id) * 4;
const T px = (anchor[anchor_start] + anchor[anchor_start + 2]) * 0.5;
const T py = (anchor[anchor_start + 1] + anchor[anchor_start + 3]) * 0.5;
const T pw = anchor[anchor_start + 2] - anchor[anchor_start];
const T ph = anchor[anchor_start + 3] - anchor[anchor_start + 1];
const T gw = pw * exp(dw);
const T gh = ph * exp(dh);
const T gx = px + pw * dx;
const T gy = py + ph * dy;
const T x1 = gx - gw * 0.5;
const T y1 = gy - gh * 0.5;
const T x2 = gx + gw * 0.5;
const T y2 = gy + gh * 0.5;
if (clip_range != nullptr) {
out_bbox[out_bbox_id] = max(T(0.), min(x1, T(clip_range[1] - 1)));
out_bbox[out_bbox_id + 1] = max(T(0.), min(y1, T(clip_range[0] - 1)));
out_bbox[out_bbox_id + 2] = max(T(0.), min(x2, T(clip_range[1] - 1)));
out_bbox[out_bbox_id + 3] = max(T(0.), min(y2, T(clip_range[0] - 1)));
} else {
out_bbox[out_bbox_id] = x1;
out_bbox[out_bbox_id + 1] = y1;
out_bbox[out_bbox_id + 2] = x2;
out_bbox[out_bbox_id + 3] = y2;
}
}
}
template <typename T>
void delta2bbox(T *out_cls, T *out_bbox, const T *in_cls, const T *in_bbox,
const T *anchor, const int *clip_range, int batch_size,
int num_bbox, int num_outbbox, int num_classes, int num_ratios,
bool use_segmoid_cls, float *mean, float *std,
cudaStream_t stream) {
SMeanStd mean_std;
memcpy(&mean_std.mean[0], mean, sizeof(float) * 4);
memcpy(&mean_std.std[0], std, sizeof(float) * 4);
const size_t input_size = batch_size * num_outbbox * num_classes;
delta2bbox_kernel<T><<<GET_BLOCKS(input_size), CUDA_NUM_THREADS, 0, stream>>>(
out_cls, out_bbox, in_cls, in_bbox, anchor, clip_range, batch_size,
num_bbox, num_outbbox, num_classes, num_ratios, use_segmoid_cls,
mean_std);
}
template void delta2bbox<float>(float *out_cls, float *out_bbox,
const float *in_cls, const float *in_bbox,
const float *anchor, const int *clip_range,
int batch_size, int num_bbox, int num_outbbox,
int num_classes, int num_ratios,
bool use_segmoid_cls, float *mean, float *std,
cudaStream_t stream);
} // namespace plugin
} // namespace amirstan
|
8bdf69db60de3163e207907e845596878bfaaf59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Compute the nonbonded parameters for particles and exceptions.
*/
extern "C" __global__ void computeParameters(mixed* __restrict__ energyBuffer, bool includeSelfEnergy, real* __restrict__ globalParams,
int numAtoms, const float4* __restrict__ baseParticleParams, real4* __restrict__ posq, real* __restrict__ charge,
float2* __restrict__ sigmaEpsilon, float4* __restrict__ particleParamOffsets, int* __restrict__ particleOffsetIndices
#ifdef HAS_EXCEPTIONS
, int numExceptions, const float4* __restrict__ baseExceptionParams, float4* __restrict__ exceptionParams,
float4* __restrict__ exceptionParamOffsets, int* __restrict__ exceptionOffsetIndices
#endif
) {
mixed energy = 0;
// Compute particle parameters.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numAtoms; i += blockDim.x*gridDim.x) {
float4 params = baseParticleParams[i];
#ifdef HAS_OFFSETS
int start = particleOffsetIndices[i], end = particleOffsetIndices[i+1];
for (int j = start; j < end; j++) {
float4 offset = particleParamOffsets[j];
real value = globalParams[(int) offset.w];
params.x += value*offset.x;
params.y += value*offset.y;
params.z += value*offset.z;
}
#endif
#ifdef USE_POSQ_CHARGES
posq[i].w = params.x;
#else
charge[i] = params.x;
#endif
sigmaEpsilon[i] = make_float2(0.5f*params.y, 2*SQRT(params.z));
#ifdef HAS_OFFSETS
#ifdef INCLUDE_EWALD
energy -= EWALD_SELF_ENERGY_SCALE*params.x*params.x;
#endif
#ifdef INCLUDE_LJPME
real sig3 = params.y*params.y*params.y;
energy += LJPME_SELF_ENERGY_SCALE*sig3*sig3*params.z;
#endif
#endif
}
// Compute exception parameters.
#ifdef HAS_EXCEPTIONS
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numExceptions; i += blockDim.x*gridDim.x) {
float4 params = baseExceptionParams[i];
#ifdef HAS_OFFSETS
int start = exceptionOffsetIndices[i], end = exceptionOffsetIndices[i+1];
for (int j = start; j < end; j++) {
float4 offset = exceptionParamOffsets[j];
real value = globalParams[(int) offset.w];
params.x += value*offset.x;
params.y += value*offset.y;
params.z += value*offset.z;
}
#endif
exceptionParams[i] = make_float4((float) (138.935456f*params.x), (float) params.y, (float) (4*params.z), 0);
}
#endif
if (includeSelfEnergy)
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Compute parameters for subtracting the reciprocal part of excluded interactions.
*/
extern "C" __global__ void computeExclusionParameters(real4* __restrict__ posq, real* __restrict__ charge, float2* __restrict__ sigmaEpsilon,
int numExclusions, const int2* __restrict__ exclusionAtoms, float4* __restrict__ exclusionParams) {
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numExclusions; i += blockDim.x*gridDim.x) {
int2 atoms = exclusionAtoms[i];
#ifdef USE_POSQ_CHARGES
real chargeProd = posq[atoms.x].w*posq[atoms.y].w;
#else
real chargeProd = charge[atoms.x]*charge[atoms.y];
#endif
#ifdef INCLUDE_LJPME
float2 sigEps1 = sigmaEpsilon[atoms.x];
float2 sigEps2 = sigmaEpsilon[atoms.y];
float sigma = sigEps1.x*sigEps2.x;
float epsilon = sigEps1.y*sigEps2.y;
#else
float sigma = 0;
float epsilon = 0;
#endif
exclusionParams[i] = make_float4((float) (138.935456f*chargeProd), sigma, epsilon, 0);
}
} | 8bdf69db60de3163e207907e845596878bfaaf59.cu | /**
* Compute the nonbonded parameters for particles and exceptions.
*/
extern "C" __global__ void computeParameters(mixed* __restrict__ energyBuffer, bool includeSelfEnergy, real* __restrict__ globalParams,
int numAtoms, const float4* __restrict__ baseParticleParams, real4* __restrict__ posq, real* __restrict__ charge,
float2* __restrict__ sigmaEpsilon, float4* __restrict__ particleParamOffsets, int* __restrict__ particleOffsetIndices
#ifdef HAS_EXCEPTIONS
, int numExceptions, const float4* __restrict__ baseExceptionParams, float4* __restrict__ exceptionParams,
float4* __restrict__ exceptionParamOffsets, int* __restrict__ exceptionOffsetIndices
#endif
) {
mixed energy = 0;
// Compute particle parameters.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numAtoms; i += blockDim.x*gridDim.x) {
float4 params = baseParticleParams[i];
#ifdef HAS_OFFSETS
int start = particleOffsetIndices[i], end = particleOffsetIndices[i+1];
for (int j = start; j < end; j++) {
float4 offset = particleParamOffsets[j];
real value = globalParams[(int) offset.w];
params.x += value*offset.x;
params.y += value*offset.y;
params.z += value*offset.z;
}
#endif
#ifdef USE_POSQ_CHARGES
posq[i].w = params.x;
#else
charge[i] = params.x;
#endif
sigmaEpsilon[i] = make_float2(0.5f*params.y, 2*SQRT(params.z));
#ifdef HAS_OFFSETS
#ifdef INCLUDE_EWALD
energy -= EWALD_SELF_ENERGY_SCALE*params.x*params.x;
#endif
#ifdef INCLUDE_LJPME
real sig3 = params.y*params.y*params.y;
energy += LJPME_SELF_ENERGY_SCALE*sig3*sig3*params.z;
#endif
#endif
}
// Compute exception parameters.
#ifdef HAS_EXCEPTIONS
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numExceptions; i += blockDim.x*gridDim.x) {
float4 params = baseExceptionParams[i];
#ifdef HAS_OFFSETS
int start = exceptionOffsetIndices[i], end = exceptionOffsetIndices[i+1];
for (int j = start; j < end; j++) {
float4 offset = exceptionParamOffsets[j];
real value = globalParams[(int) offset.w];
params.x += value*offset.x;
params.y += value*offset.y;
params.z += value*offset.z;
}
#endif
exceptionParams[i] = make_float4((float) (138.935456f*params.x), (float) params.y, (float) (4*params.z), 0);
}
#endif
if (includeSelfEnergy)
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Compute parameters for subtracting the reciprocal part of excluded interactions.
*/
extern "C" __global__ void computeExclusionParameters(real4* __restrict__ posq, real* __restrict__ charge, float2* __restrict__ sigmaEpsilon,
int numExclusions, const int2* __restrict__ exclusionAtoms, float4* __restrict__ exclusionParams) {
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numExclusions; i += blockDim.x*gridDim.x) {
int2 atoms = exclusionAtoms[i];
#ifdef USE_POSQ_CHARGES
real chargeProd = posq[atoms.x].w*posq[atoms.y].w;
#else
real chargeProd = charge[atoms.x]*charge[atoms.y];
#endif
#ifdef INCLUDE_LJPME
float2 sigEps1 = sigmaEpsilon[atoms.x];
float2 sigEps2 = sigmaEpsilon[atoms.y];
float sigma = sigEps1.x*sigEps2.x;
float epsilon = sigEps1.y*sigEps2.y;
#else
float sigma = 0;
float epsilon = 0;
#endif
exclusionParams[i] = make_float4((float) (138.935456f*chargeProd), sigma, epsilon, 0);
}
} |
c95b270ba124bc5eed6764e167f533bd14aacb53.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "biasd_cuda_integrate.h"
#include "biasd_cuda.h"
// ##########################################################
// ##########################################################
__global__ void kernel_loglikelihood(int N, double * d, double ep1, double ep2, double sigma1, double sigma2, double k1, double k2, double tau, double epsilon, double * ll) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < N) {
double out, intval[2] = {0.,0.};
theta t = {.ep1 = ep1, .ep2 = ep2, .sig1 = sigma1, .sig2 = sigma2, .k1 = k1, .k2 = k2, .tau = tau};
// Calculate the state contributions
out = k2/(k1+k2) / sigma1 * exp(-k1*tau - .5 * pow((d[idx]-ep1)/sigma1,2.)); // state 1
out += k1/(k1+k2) / sigma2 * exp(-k2*tau - .5 * pow((d[idx]-ep2)/sigma2,2.)); // state 2
// Perform the blurring integral
adaptive_integrate(0.,1.,intval,epsilon,d[idx],&t);
out += 2.*k1*k2/(k1+k2)*tau *intval[0]; // the blurring contribution
out = log(out) - .5 * log(2.* M_PI); // Add prefactor
ll[idx] = out; // transfer out result
}
}
void log_likelihood(int device, int N, double * d, double ep1, double ep2, double sigma1, double sigma2, double k1, double k2, double tau, double epsilon, double * ll) {
// Sanity checks from the model
if ((ep1 < ep2) && (sigma1 > 0.) && (sigma2 > 0.) && (k1 > 0.) && (k2 > 0.) && (tau > 0.) && (epsilon > 0.)) {
// Initialize CUDA things
//get_cuda_errors();
hipSetDevice(device);
//hipDeviceProp_t deviceProp;
//hipGetDeviceProperties(&deviceProp, device);
int threads = 256;//deviceProp.maxThreadsPerBlock/8;
int blocks = (N+threads-1)/threads;
double * d_d;
double * ll_d;
hipMalloc((void**)&d_d,N*sizeof(double));
hipMalloc((void**)&ll_d,N*sizeof(double));
hipMemcpy(d_d,d,N*sizeof(double),hipMemcpyHostToDevice);
// Evaluate integrand at f -> store in y.
hipLaunchKernelGGL(( kernel_loglikelihood), dim3(blocks),dim3(threads), 0, 0, N,d_d,ep1,ep2,sigma1,sigma2,k1,k2,tau,epsilon,ll_d);
hipMemcpy(ll,ll_d,N*sizeof(double),hipMemcpyDeviceToHost);
hipFree(d_d);
hipFree(ll_d);
//get_cuda_errors();
} else {
int i;
for (i=0;i<N;i++){ ll[i] = -INFINITY;}
}
}
double sum_log_likelihood(int device, int N, double *d, double ep1, double ep2, double sigma1, double sigma2, double k1, double k2, double tau, double epsilon) {
int i = 0;
double sum = 0.;
double * ll;
ll = (double *) malloc(N*sizeof(double));
log_likelihood(device,N,d,ep1,ep2,sigma1,sigma2,k1,k2,tau,epsilon,ll);
for (i=0;i<N;i++) {
sum += ll[i];
}
free(ll);
return sum;
}
| c95b270ba124bc5eed6764e167f533bd14aacb53.cu | #include <stdlib.h>
#include <cuda.h>
#include "biasd_cuda_integrate.h"
#include "biasd_cuda.h"
// ##########################################################
// ##########################################################
__global__ void kernel_loglikelihood(int N, double * d, double ep1, double ep2, double sigma1, double sigma2, double k1, double k2, double tau, double epsilon, double * ll) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < N) {
double out, intval[2] = {0.,0.};
theta t = {.ep1 = ep1, .ep2 = ep2, .sig1 = sigma1, .sig2 = sigma2, .k1 = k1, .k2 = k2, .tau = tau};
// Calculate the state contributions
out = k2/(k1+k2) / sigma1 * exp(-k1*tau - .5 * pow((d[idx]-ep1)/sigma1,2.)); // state 1
out += k1/(k1+k2) / sigma2 * exp(-k2*tau - .5 * pow((d[idx]-ep2)/sigma2,2.)); // state 2
// Perform the blurring integral
adaptive_integrate(0.,1.,intval,epsilon,d[idx],&t);
out += 2.*k1*k2/(k1+k2)*tau *intval[0]; // the blurring contribution
out = log(out) - .5 * log(2.* M_PI); // Add prefactor
ll[idx] = out; // transfer out result
}
}
void log_likelihood(int device, int N, double * d, double ep1, double ep2, double sigma1, double sigma2, double k1, double k2, double tau, double epsilon, double * ll) {
// Sanity checks from the model
if ((ep1 < ep2) && (sigma1 > 0.) && (sigma2 > 0.) && (k1 > 0.) && (k2 > 0.) && (tau > 0.) && (epsilon > 0.)) {
// Initialize CUDA things
//get_cuda_errors();
cudaSetDevice(device);
//cudaDeviceProp deviceProp;
//cudaGetDeviceProperties(&deviceProp, device);
int threads = 256;//deviceProp.maxThreadsPerBlock/8;
int blocks = (N+threads-1)/threads;
double * d_d;
double * ll_d;
cudaMalloc((void**)&d_d,N*sizeof(double));
cudaMalloc((void**)&ll_d,N*sizeof(double));
cudaMemcpy(d_d,d,N*sizeof(double),cudaMemcpyHostToDevice);
// Evaluate integrand at f -> store in y.
kernel_loglikelihood<<<blocks,threads>>>(N,d_d,ep1,ep2,sigma1,sigma2,k1,k2,tau,epsilon,ll_d);
cudaMemcpy(ll,ll_d,N*sizeof(double),cudaMemcpyDeviceToHost);
cudaFree(d_d);
cudaFree(ll_d);
//get_cuda_errors();
} else {
int i;
for (i=0;i<N;i++){ ll[i] = -INFINITY;}
}
}
double sum_log_likelihood(int device, int N, double *d, double ep1, double ep2, double sigma1, double sigma2, double k1, double k2, double tau, double epsilon) {
int i = 0;
double sum = 0.;
double * ll;
ll = (double *) malloc(N*sizeof(double));
log_likelihood(device,N,d,ep1,ep2,sigma1,sigma2,k1,k2,tau,epsilon,ll);
for (i=0;i<N;i++) {
sum += ll[i];
}
free(ll);
return sum;
}
|
a1880a03e6a07f7b23ccd3ac3d7511bc0e5b2fef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/cast.h"
#include "utils/op_kernel.h"
#include "utils/math_utils.h"
#include "utils/cub_device.h"
#include "utils/math_functions.h"
namespace dragon {
namespace kernel {
template <typename Tx, typename Ty>
__global__ void _ColwiseMoments(
const int rows,
const int cols,
const Tx* x,
Ty* mean,
Ty* var) {
__shared__ typename BlockReduce<Ty>::TempStorage m_storage;
__shared__ typename BlockReduce<Ty>::TempStorage v_storage;
const Ty scale = (Ty)1 / static_cast<Ty>(cols);
CUDA_2D_KERNEL_LOOP1(i, rows) {
Ty m_val = 0, v_val = 0;
CUDA_2D_KERNEL_LOOP2(j, cols) {
const int x_idx = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(x + x_idx);
v_val += __ldg(x + x_idx) * __ldg(x + x_idx);
#else
m_val += x[x_idx];
v_val += x[x_idx] * x[x_idx];
#endif
}
m_val = BlockReduce<Ty>(m_storage).Sum(m_val);
v_val = BlockReduce<Ty>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const Ty mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template<> __global__ void _ColwiseMoments<half, float>(
const int rows,
const int cols,
const half* x,
float* mean,
float* var) {
#if __CUDA_ARCH__ >= 530
__shared__ typename BlockReduce<float>::TempStorage m_storage;
__shared__ typename BlockReduce<float>::TempStorage v_storage;
const float scale = 1.f / cols;
CUDA_2D_KERNEL_LOOP1(i, rows) {
float m_val = 0.f, v_val = 0.f;
CUDA_2D_KERNEL_LOOP2(j, cols) {
const int x_idx = i * cols + j;
m_val += __half2float(__ldg(x + x_idx));
v_val += __half2float(__ldg(x + x_idx)) *
__half2float(__ldg(x + x_idx));
}
m_val = BlockReduce<float>(m_storage).Sum(m_val);
v_val = BlockReduce<float>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
#endif
}
template <typename Tx, typename Ty>
__global__ void _RowwiseMoments(
const int rows,
const int cols,
const Tx* x,
Ty* mean,
Ty* var) {
__shared__ typename BlockReduce<Ty>::TempStorage m_storage;
__shared__ typename BlockReduce<Ty>::TempStorage v_storage;
const Ty scale = (Ty)1 / static_cast<Ty>(rows);
CUDA_2D_KERNEL_LOOP1(i, cols) {
Ty m_val = 0, v_val = 0;
CUDA_2D_KERNEL_LOOP2(j, rows) {
const int x_idx = j * cols + i;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(x + x_idx);
v_val += __ldg(x + x_idx) * __ldg(x + x_idx);
#else
m_val += x[x_idx];
v_val += x[x_idx] * x[x_idx];
#endif
}
m_val = BlockReduce<Ty>(m_storage).Sum(m_val);
v_val = BlockReduce<Ty>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const Ty mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template<> __global__ void _RowwiseMoments<half, float>(
const int rows,
const int cols,
const half* x,
float* mean,
float* var) {
#if __CUDA_ARCH__ >= 530
__shared__ typename BlockReduce<float>::TempStorage m_storage;
__shared__ typename BlockReduce<float>::TempStorage v_storage;
const float scale = 1.f / rows;
CUDA_2D_KERNEL_LOOP1(i, cols) {
float m_val = 0.f, v_val = 0.f;
CUDA_2D_KERNEL_LOOP2(j, rows) {
const int x_idx = j * cols + i;
m_val += __half2float(__ldg(x + x_idx));
v_val += __half2float(__ldg(x + x_idx)) *
__half2float(__ldg(x + x_idx));
}
m_val = BlockReduce<float>(m_storage).Sum(m_val);
v_val = BlockReduce<float>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
#endif
}
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
template <typename Tx, typename Ty>
__global__ void _GenericMoments(
const int ndims,
const int outer_dim,
const int inner_dim,
const int* x_strides,
const int* y_dims,
const Tx* x,
Ty* mean,
Ty* var) {
__shared__ typename BlockReduce<Ty>::TempStorage m_storage;
__shared__ typename BlockReduce<Ty>::TempStorage v_storage;
const Ty scale = (Ty)1 / static_cast<Ty>(inner_dim);
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
Ty m_val = 0, v_val = 0;
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
int x_idx = 0, y_idx = i * inner_dim + j;
#pragma unroll
for (int d = ndims - 1; d >= 0; --d) {
int r;
#if __CUDA_ARCH__ >= 350
FIXED_DIVISOR_DIV_MOD(__ldg(y_dims + d), y_idx, &y_idx, &r);
x_idx += r * __ldg(x_strides + d);
#else
FIXED_DIVISOR_DIV_MOD(y_dims[d], y_idx, &y_idx, &r);
x_idx += r * x_strides[d];
#endif
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(x + x_idx);
v_val += __ldg(x + x_idx) * __ldg(x + x_idx);
#else
m_val += x[x_idx];
v_val += x[x_idx] * x[x_idx];
#endif
}
m_val = BlockReduce<Ty>(m_storage).Sum(m_val);
v_val = BlockReduce<Ty>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const Ty mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template<> __global__ void _GenericMoments<half, float>(
const int ndims,
const int outer_dim,
const int inner_dim,
const int* x_strides,
const int* y_dims,
const half* x,
float* mean,
float* var) {
#if __CUDA_ARCH__ >= 530
__shared__ typename BlockReduce<float>::TempStorage m_storage;
__shared__ typename BlockReduce<float>::TempStorage v_storage;
const float scale = 1.f / inner_dim;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
float m_val = 0.f, v_val = 0.f;
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
int x_idx = 0, y_idx = i * inner_dim + j;
#pragma unroll
for (int d = ndims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(__ldg(y_dims + d), y_idx, &y_idx, &r);
x_idx += r * __ldg(x_strides + d);
}
m_val += __half2float(__ldg(x + x_idx));
v_val += __half2float(__ldg(x + x_idx)) *
__half2float(__ldg(x + x_idx));
}
m_val = BlockReduce<float>(m_storage).Sum(m_val);
v_val = BlockReduce<float>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
#endif
}
template <typename Tx, typename Ty>
void _Moments(
const int ndims,
const int* dims,
const int naxes,
const int* axes,
const Tx* x,
Ty* mean,
Ty* var,
CUDAContext* ctx) {
vector<int> y_dimsV(dims, dims + ndims);
for (int i = 0; i < naxes; ++i) y_dimsV[axes[i]] = 1;
const int* x_dims = dims; const int* y_dims = y_dimsV.data();
const int x_size = std::accumulate(x_dims,
x_dims + ndims, 1, std::multiplies<int>());
const int y_size = std::accumulate(y_dims,
y_dims + ndims, 1, std::multiplies<int>());
int rows, cols;
/*! Case #1: Colwise Reduce */
if (utils::IsColwiseReduce(ndims, x_dims, y_dims, &rows, &cols)) {
_ColwiseMoments<Tx, Ty>
<< < CUDA_2D_BLOCKS(rows), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(rows, cols, x, mean, var); return;
}
/*! Case #2: Rowwise Reduce */
if (utils::IsRowwiseReduce(ndims, x_dims, y_dims, &rows, &cols)) {
_RowwiseMoments<Tx, Ty>
<< < CUDA_2D_BLOCKS(cols), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(rows, cols, x, mean, var); return;
}
/*! Case #3: Generic Reduce */
vector<int> axesT(ndims), stridesT(ndims), dimsT(ndims);
utils::ComputeTransposedAxesForReduce(
ndims, naxes, axes, axesT.data());
utils::ComputeTransposedStrides(
ndims, dims, axesT.data(), stridesT.data());
int outer_dim = 1, inner_dim = 1;
const int pivot = ndims - naxes;
for (int i = 0; i < pivot; ++i) outer_dim *= dims[axesT[i]];
for (int i = pivot; i < ndims; ++i) inner_dim *= dims[axesT[i]];
for (int i = 0; i < ndims; ++i) dimsT[i] = dims[axesT[i]];
const int dbytes = sizeof(int) * ndims;
int* XSS = (int*)ctx->New(dbytes), *YDS = (int*)ctx->New(dbytes);
ctx->Memcpy<CUDAContext, CPUContext>(dbytes, XSS, stridesT.data());
ctx->Memcpy<CUDAContext, CPUContext>(dbytes, YDS, dimsT.data());
_GenericMoments<Tx, Ty>
<< < CUDA_2D_BLOCKS(outer_dim), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(ndims, outer_dim, inner_dim, XSS, YDS, x, mean, var);
ctx->FinishDeviceCompution();
ctx->Delete(XSS); ctx->Delete(YDS);
}
#define DEFINE_MOMENTS_KERNEL_LAUNCHER(Tx, Ty) \
template <> void Moments<Tx, Ty, CUDAContext>( \
const int ndims, \
const int* dims, \
const int naxes, \
const int* axes, \
const Tx* x, \
Ty* mean, \
Ty* var, \
CUDAContext* ctx) { \
_Moments<Tx, Ty>(ndims, dims, \
naxes, axes, x, mean, var, ctx); \
}
DEFINE_MOMENTS_KERNEL_LAUNCHER(int8_t, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(uint8_t, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(int, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(int64_t, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(float, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(double, double);
template <> void Moments<float16, float, CUDAContext>(
const int ndims,
const int* dims,
const int naxes,
const int* axes,
const float16* x,
float* mean,
float* var,
CUDAContext* ctx) {
_Moments<half, float>(ndims, dims, naxes, axes,
reinterpret_cast<const half*>(x), mean, var, ctx);
}
#undef FIXED_DIVISOR_DIV_MOD
#undef DEFINE_MOMENTS_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // WITH_CUDA | a1880a03e6a07f7b23ccd3ac3d7511bc0e5b2fef.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/cast.h"
#include "utils/op_kernel.h"
#include "utils/math_utils.h"
#include "utils/cub_device.h"
#include "utils/math_functions.h"
namespace dragon {
namespace kernel {
template <typename Tx, typename Ty>
__global__ void _ColwiseMoments(
const int rows,
const int cols,
const Tx* x,
Ty* mean,
Ty* var) {
__shared__ typename BlockReduce<Ty>::TempStorage m_storage;
__shared__ typename BlockReduce<Ty>::TempStorage v_storage;
const Ty scale = (Ty)1 / static_cast<Ty>(cols);
CUDA_2D_KERNEL_LOOP1(i, rows) {
Ty m_val = 0, v_val = 0;
CUDA_2D_KERNEL_LOOP2(j, cols) {
const int x_idx = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(x + x_idx);
v_val += __ldg(x + x_idx) * __ldg(x + x_idx);
#else
m_val += x[x_idx];
v_val += x[x_idx] * x[x_idx];
#endif
}
m_val = BlockReduce<Ty>(m_storage).Sum(m_val);
v_val = BlockReduce<Ty>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const Ty mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template<> __global__ void _ColwiseMoments<half, float>(
const int rows,
const int cols,
const half* x,
float* mean,
float* var) {
#if __CUDA_ARCH__ >= 530
__shared__ typename BlockReduce<float>::TempStorage m_storage;
__shared__ typename BlockReduce<float>::TempStorage v_storage;
const float scale = 1.f / cols;
CUDA_2D_KERNEL_LOOP1(i, rows) {
float m_val = 0.f, v_val = 0.f;
CUDA_2D_KERNEL_LOOP2(j, cols) {
const int x_idx = i * cols + j;
m_val += __half2float(__ldg(x + x_idx));
v_val += __half2float(__ldg(x + x_idx)) *
__half2float(__ldg(x + x_idx));
}
m_val = BlockReduce<float>(m_storage).Sum(m_val);
v_val = BlockReduce<float>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
#endif
}
template <typename Tx, typename Ty>
__global__ void _RowwiseMoments(
const int rows,
const int cols,
const Tx* x,
Ty* mean,
Ty* var) {
__shared__ typename BlockReduce<Ty>::TempStorage m_storage;
__shared__ typename BlockReduce<Ty>::TempStorage v_storage;
const Ty scale = (Ty)1 / static_cast<Ty>(rows);
CUDA_2D_KERNEL_LOOP1(i, cols) {
Ty m_val = 0, v_val = 0;
CUDA_2D_KERNEL_LOOP2(j, rows) {
const int x_idx = j * cols + i;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(x + x_idx);
v_val += __ldg(x + x_idx) * __ldg(x + x_idx);
#else
m_val += x[x_idx];
v_val += x[x_idx] * x[x_idx];
#endif
}
m_val = BlockReduce<Ty>(m_storage).Sum(m_val);
v_val = BlockReduce<Ty>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const Ty mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template<> __global__ void _RowwiseMoments<half, float>(
const int rows,
const int cols,
const half* x,
float* mean,
float* var) {
#if __CUDA_ARCH__ >= 530
__shared__ typename BlockReduce<float>::TempStorage m_storage;
__shared__ typename BlockReduce<float>::TempStorage v_storage;
const float scale = 1.f / rows;
CUDA_2D_KERNEL_LOOP1(i, cols) {
float m_val = 0.f, v_val = 0.f;
CUDA_2D_KERNEL_LOOP2(j, rows) {
const int x_idx = j * cols + i;
m_val += __half2float(__ldg(x + x_idx));
v_val += __half2float(__ldg(x + x_idx)) *
__half2float(__ldg(x + x_idx));
}
m_val = BlockReduce<float>(m_storage).Sum(m_val);
v_val = BlockReduce<float>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
#endif
}
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
template <typename Tx, typename Ty>
__global__ void _GenericMoments(
const int ndims,
const int outer_dim,
const int inner_dim,
const int* x_strides,
const int* y_dims,
const Tx* x,
Ty* mean,
Ty* var) {
__shared__ typename BlockReduce<Ty>::TempStorage m_storage;
__shared__ typename BlockReduce<Ty>::TempStorage v_storage;
const Ty scale = (Ty)1 / static_cast<Ty>(inner_dim);
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
Ty m_val = 0, v_val = 0;
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
int x_idx = 0, y_idx = i * inner_dim + j;
#pragma unroll
for (int d = ndims - 1; d >= 0; --d) {
int r;
#if __CUDA_ARCH__ >= 350
FIXED_DIVISOR_DIV_MOD(__ldg(y_dims + d), y_idx, &y_idx, &r);
x_idx += r * __ldg(x_strides + d);
#else
FIXED_DIVISOR_DIV_MOD(y_dims[d], y_idx, &y_idx, &r);
x_idx += r * x_strides[d];
#endif
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(x + x_idx);
v_val += __ldg(x + x_idx) * __ldg(x + x_idx);
#else
m_val += x[x_idx];
v_val += x[x_idx] * x[x_idx];
#endif
}
m_val = BlockReduce<Ty>(m_storage).Sum(m_val);
v_val = BlockReduce<Ty>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const Ty mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template<> __global__ void _GenericMoments<half, float>(
const int ndims,
const int outer_dim,
const int inner_dim,
const int* x_strides,
const int* y_dims,
const half* x,
float* mean,
float* var) {
#if __CUDA_ARCH__ >= 530
__shared__ typename BlockReduce<float>::TempStorage m_storage;
__shared__ typename BlockReduce<float>::TempStorage v_storage;
const float scale = 1.f / inner_dim;
CUDA_2D_KERNEL_LOOP1(i, outer_dim) {
float m_val = 0.f, v_val = 0.f;
CUDA_2D_KERNEL_LOOP2(j, inner_dim) {
int x_idx = 0, y_idx = i * inner_dim + j;
#pragma unroll
for (int d = ndims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(__ldg(y_dims + d), y_idx, &y_idx, &r);
x_idx += r * __ldg(x_strides + d);
}
m_val += __half2float(__ldg(x + x_idx));
v_val += __half2float(__ldg(x + x_idx)) *
__half2float(__ldg(x + x_idx));
}
m_val = BlockReduce<float>(m_storage).Sum(m_val);
v_val = BlockReduce<float>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
#endif
}
template <typename Tx, typename Ty>
void _Moments(
const int ndims,
const int* dims,
const int naxes,
const int* axes,
const Tx* x,
Ty* mean,
Ty* var,
CUDAContext* ctx) {
vector<int> y_dimsV(dims, dims + ndims);
for (int i = 0; i < naxes; ++i) y_dimsV[axes[i]] = 1;
const int* x_dims = dims; const int* y_dims = y_dimsV.data();
const int x_size = std::accumulate(x_dims,
x_dims + ndims, 1, std::multiplies<int>());
const int y_size = std::accumulate(y_dims,
y_dims + ndims, 1, std::multiplies<int>());
int rows, cols;
/*! Case #1: Colwise Reduce */
if (utils::IsColwiseReduce(ndims, x_dims, y_dims, &rows, &cols)) {
_ColwiseMoments<Tx, Ty>
<< < CUDA_2D_BLOCKS(rows), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(rows, cols, x, mean, var); return;
}
/*! Case #2: Rowwise Reduce */
if (utils::IsRowwiseReduce(ndims, x_dims, y_dims, &rows, &cols)) {
_RowwiseMoments<Tx, Ty>
<< < CUDA_2D_BLOCKS(cols), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(rows, cols, x, mean, var); return;
}
/*! Case #3: Generic Reduce */
vector<int> axesT(ndims), stridesT(ndims), dimsT(ndims);
utils::ComputeTransposedAxesForReduce(
ndims, naxes, axes, axesT.data());
utils::ComputeTransposedStrides(
ndims, dims, axesT.data(), stridesT.data());
int outer_dim = 1, inner_dim = 1;
const int pivot = ndims - naxes;
for (int i = 0; i < pivot; ++i) outer_dim *= dims[axesT[i]];
for (int i = pivot; i < ndims; ++i) inner_dim *= dims[axesT[i]];
for (int i = 0; i < ndims; ++i) dimsT[i] = dims[axesT[i]];
const int dbytes = sizeof(int) * ndims;
int* XSS = (int*)ctx->New(dbytes), *YDS = (int*)ctx->New(dbytes);
ctx->Memcpy<CUDAContext, CPUContext>(dbytes, XSS, stridesT.data());
ctx->Memcpy<CUDAContext, CPUContext>(dbytes, YDS, dimsT.data());
_GenericMoments<Tx, Ty>
<< < CUDA_2D_BLOCKS(outer_dim), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(ndims, outer_dim, inner_dim, XSS, YDS, x, mean, var);
ctx->FinishDeviceCompution();
ctx->Delete(XSS); ctx->Delete(YDS);
}
#define DEFINE_MOMENTS_KERNEL_LAUNCHER(Tx, Ty) \
template <> void Moments<Tx, Ty, CUDAContext>( \
const int ndims, \
const int* dims, \
const int naxes, \
const int* axes, \
const Tx* x, \
Ty* mean, \
Ty* var, \
CUDAContext* ctx) { \
_Moments<Tx, Ty>(ndims, dims, \
naxes, axes, x, mean, var, ctx); \
}
DEFINE_MOMENTS_KERNEL_LAUNCHER(int8_t, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(uint8_t, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(int, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(int64_t, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(float, float);
DEFINE_MOMENTS_KERNEL_LAUNCHER(double, double);
template <> void Moments<float16, float, CUDAContext>(
const int ndims,
const int* dims,
const int naxes,
const int* axes,
const float16* x,
float* mean,
float* var,
CUDAContext* ctx) {
_Moments<half, float>(ndims, dims, naxes, axes,
reinterpret_cast<const half*>(x), mean, var, ctx);
}
#undef FIXED_DIVISOR_DIV_MOD
#undef DEFINE_MOMENTS_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // WITH_CUDA |
17ba59ceb4e0ed575e15ccdaa5ba6d641edfa85d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/arithmetic.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
/******************************* add operation *******************************/
__global__
void addKernel0(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)(src0 + element_y * src0_stride);
const uchar4* input1 = (uchar4*)(src1 + element_y * src1_stride);
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
uchar4 output_value;
output_value.x = saturate_cast((int)input_value0.x + input_value1.x);
output_value.y = saturate_cast((int)input_value0.y + input_value1.y);
output_value.z = saturate_cast((int)input_value0.z + input_value1.z);
output_value.w = saturate_cast((int)input_value0.w + input_value1.w);
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void addKernel10(const uchar* src0, int cols, const uchar* src1, uchar* dst) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)src0;
const uchar4* input1 = (uchar4*)src1;
uchar4 input_value0, input_value1, output_value;
input_value0 = input0[element_x];
input_value1 = input1[element_x];
if (index_x < cols - 4) {
output_value.x = saturate_cast((int)input_value0.x + input_value1.x);
output_value.y = saturate_cast((int)input_value0.y + input_value1.y);
output_value.z = saturate_cast((int)input_value0.z + input_value1.z);
output_value.w = saturate_cast((int)input_value0.w + input_value1.w);
uchar4* output = (uchar4*)dst;
output[element_x] = output_value;
}
else {
output_value.x = saturate_cast((int)input_value0.x + input_value1.x);
if (index_x < cols - 1) {
output_value.y = saturate_cast((int)input_value0.y + input_value1.y);
}
if (index_x < cols - 2) {
output_value.z = saturate_cast((int)input_value0.z + input_value1.z);
}
if (index_x < cols - 3) {
output_value.w = saturate_cast((int)input_value0.w + input_value1.w);
}
dst[index_x] = output_value.x;
if (index_x < cols - 1) {
dst[index_x + 1] = output_value.y;
}
if (index_x < cols - 2) {
dst[index_x + 2] = output_value.z;
}
if (index_x < cols - 3) {
dst[index_x + 3] = output_value.w;
}
}
}
__global__
void addKernel11(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int index_x = element_x << 2;
if (element_y >= rows || index_x >= cols) {
return;
}
const uchar* input0 = src0 + element_y * src0_stride;
const uchar* input1 = src1 + element_y * src1_stride;
uchar* output = dst + element_y * dst_stride;
uchar input_value00, input_value01, input_value02, input_value03;
uchar input_value10, input_value11, input_value12, input_value13;
int output_value0, output_value1, output_value2, output_value3;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[index_x];
input_value01 = input0[index_x + 1];
input_value02 = input0[index_x + 2];
input_value03 = input0[index_x + 3];
input_value10 = input1[index_x];
input_value11 = input1[index_x + 1];
input_value12 = input1[index_x + 2];
input_value13 = input1[index_x + 3];
output_value0 = input_value00 + input_value10;
output_value1 = input_value01 + input_value11;
output_value2 = input_value02 + input_value12;
output_value3 = input_value03 + input_value13;
output[index_x] = saturate_cast(output_value0);
output[index_x + 1] = saturate_cast(output_value1);
output[index_x + 2] = saturate_cast(output_value2);
output[index_x + 3] = saturate_cast(output_value3);
}
else {
input_value00 = input0[index_x];
if (index_x < cols - 1) {
input_value01 = input0[index_x + 1];
}
if (index_x < cols - 2) {
input_value02 = input0[index_x + 2];
}
if (index_x < cols - 3) {
input_value03 = input0[index_x + 3];
}
input_value10 = input1[index_x];
if (index_x < cols - 1) {
input_value11 = input1[index_x + 1];
}
if (index_x < cols - 2) {
input_value12 = input1[index_x + 2];
}
if (index_x < cols - 3) {
input_value13 = input1[index_x + 3];
}
output_value0 = input_value00 + input_value10;
if (index_x < cols - 1) {
output_value1 = input_value01 + input_value11;
}
if (index_x < cols - 2) {
output_value2 = input_value02 + input_value12;
}
if (index_x < cols - 3) {
output_value3 = input_value03 + input_value13;
}
output[index_x] = saturate_cast(output_value0);
if (index_x < cols - 1) {
output[index_x + 1] = saturate_cast(output_value1);
}
if (index_x < cols - 2) {
output[index_x + 2] = saturate_cast(output_value2);
}
if (index_x < cols - 3) {
output[index_x + 3] = saturate_cast(output_value3);
}
}
}
__global__
void addKernel0(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input0 = (float2*)((uchar*)src0 + element_y * src0_stride);
const float2* input1 = (float2*)((uchar*)src1 + element_y * src1_stride);
float2 input_value0 = input0[element_x];
float2 input_value1 = input1[element_x];
float2 output_value;
output_value.x = input_value0.x + input_value1.x;
output_value.y = input_value0.y + input_value1.y;
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void addKernel1(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float* input0 = (float*)((uchar*)src0 + element_y * src0_stride);
const float* input1 = (float*)((uchar*)src1 + element_y * src1_stride);
float* output = (float*)((uchar*)dst + element_y * dst_stride);
float input_value00, input_value01;
float input_value10, input_value11;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
output_value0 = input_value00 + input_value10;
output_value1 = input_value01 + input_value11;
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
output_value0 = input_value00 + input_value10;
if (element_x != cols - 1) {
output_value1 = input_value01 + input_value11;
}
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
RetCode add(const uchar* src0, int rows, int cols, int channels,
int src0_stride, const uchar* src1, int src1_stride, uchar* dst,
int dst_stride, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
cols = divideUp(columns, 4, 2);
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 3) == 0 && (src1_stride & 3) == 0 &&
(dst_stride & 3) == 0) {
hipLaunchKernelGGL(( addKernel0), dim3(grid), dim3(block), 0, stream, src0, rows, cols, src0_stride, src1,
src1_stride, dst, dst_stride);
}
else if (src0_stride == columns && src1_stride == columns &&
dst_stride == columns ) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
hipLaunchKernelGGL(( addKernel10), dim3(grid), dim3(block), 0, stream, src0, columns, src1, dst);
}
else {
hipLaunchKernelGGL(( addKernel11), dim3(grid), dim3(block), 0, stream, src0, rows, cols, src0_stride, src1,
src1_stride, dst, dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode add(const float* src0, int rows, int cols, int channels,
int src0_stride, const float* src1, int src1_stride, float* dst,
int dst_stride, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
hipLaunchKernelGGL(( addKernel0), dim3(grid), dim3(block), 0, stream, src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride);
}
else {
hipLaunchKernelGGL(( addKernel1), dim3(grid), dim3(block), 0, stream, src0, rows, columns, src0_stride,
src1, src1_stride, dst, dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Add<uchar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData) {
RetCode code = add(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<uchar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData) {
RetCode code = add(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<uchar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData) {
RetCode code = add(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = add(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = add(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = add(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
/*************************** addWeighted operation ***************************/
template <typename T0, typename T1>
__global__
void addWeightedKernel0(const T0* src0, int rows, int cols, int src0_stride,
float alpha, const T0* src1, int src1_stride,
float beta, float gamma, T0* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T1* input0 = (T1*)((uchar*)src0 + element_y * src0_stride);
const T1* input1 = (T1*)((uchar*)src1 + element_y * src1_stride);
T1* output = (T1*)((uchar*)dst + element_y * dst_stride);
T1 input_value0 = input0[element_x];
T1 input_value1 = input1[element_x];
float2 output_value0 = make_float2(gamma, gamma);
output_value0.x += input_value0.x * alpha;
output_value0.y += input_value0.y * alpha;
output_value0.x += input_value1.x * beta;
output_value0.y += input_value1.y * beta;
output[element_x] = saturate_cast_vector<T1, float2>(output_value0);
}
template <typename T>
__global__
void addWeightedKernel1(const T* src0, int rows, int cols, int src0_stride,
float alpha, const T* src1, int src1_stride,
float beta, float gamma, T* dst, int dst_stride) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input0 = (T*)((uchar*)src0 + element_y * src0_stride);
const T* input1 = (T*)((uchar*)src1 + element_y * src1_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value00, input_value01;
T input_value10, input_value11;
float output_value0 = gamma;
float output_value1 = gamma;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
output_value0 += input_value00 * alpha;
output_value1 += input_value01 * alpha;
output_value0 += input_value10 * beta;
output_value1 += input_value11 * beta;
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
output_value0 += input_value00 * alpha;
output_value0 += input_value10 * beta;
if (element_x != cols - 1) {
output_value1 += input_value01 * alpha;
output_value1 += input_value11 * beta;
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
if (element_x != cols - 1) {
output[element_x + 1] = saturate_cast(output_value1);
}
}
else {
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
}
RetCode addWeighted(const uchar* src0, int rows, int cols, int channels,
int src0_stride, float alpha, const uchar* src1,
int src1_stride, float beta, float gamma, uchar* dst,
int dst_stride, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 1) == 0 && (src1_stride & 1) == 0 &&
(dst_stride & 1) == 0) {
cols = divideUp(columns, 2, 1);
hipLaunchKernelGGL(( addWeightedKernel0<uchar, uchar2>), dim3(grid), dim3(block), 0, stream, src0, rows,
cols, src0_stride, alpha, src1, src1_stride, beta, gamma, dst,
dst_stride);
}
else {
hipLaunchKernelGGL(( addWeightedKernel1<uchar>), dim3(grid), dim3(block), 0, stream, src0, rows, columns,
src0_stride, alpha, src1, src1_stride, beta, gamma, dst, dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode addWeighted(const float* src0, int rows, int cols, int channels,
int src0_stride, float alpha, const float* src1,
int src1_stride, float beta, float gamma, float* dst,
int dst_stride, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
hipLaunchKernelGGL(( addWeightedKernel0<float, float2>), dim3(grid), dim3(block), 0, stream, src0, rows,
cols, src0_stride, alpha, src1, src1_stride, beta, gamma, dst,
dst_stride);
}
else {
hipLaunchKernelGGL(( addWeightedKernel1<float>), dim3(grid), dim3(block), 0, stream, src0, rows, columns,
src0_stride, alpha, src1, src1_stride, beta, gamma, dst, dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode AddWeighted<uchar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
float alpha,
int inWidthStride1,
const uchar* inData1,
float beta,
float gamma,
int outWidthStride,
uchar* outData) {
RetCode code = addWeighted(inData0, height, width, 1, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<uchar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
float alpha,
int inWidthStride1,
const uchar* inData1,
float beta,
float gamma,
int outWidthStride,
uchar* outData) {
RetCode code = addWeighted(inData0, height, width, 3, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<uchar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
float alpha,
int inWidthStride1,
const uchar* inData1,
float beta,
float gamma,
int outWidthStride,
uchar* outData) {
RetCode code = addWeighted(inData0, height, width, 4, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
float alpha,
int inWidthStride1,
const float* inData1,
float beta,
float gamma,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = addWeighted(inData0, height, width, 1, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
float alpha,
int inWidthStride1,
const float* inData1,
float beta,
float gamma,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = addWeighted(inData0, height, width, 3, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
float alpha,
int inWidthStride1,
const float* inData1,
float beta,
float gamma,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = addWeighted(inData0, height, width, 4, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
/**************************** subtract operation *****************************/
template <typename T0, typename T1>
__global__
void subtractKernel0(const T0* src, int rows, int cols, int channels,
int src_stride, T0 scalar0, T0 scalar1, T0 scalar2,
T0 scalar3, T0* dst, int dst_stride) {
int element_x, element_y;
if (sizeof(T0) == 1) {
element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= rows || element_x >= cols) {
return;
}
const T1* input = (T1*)((uchar*)src + element_y * src_stride);
T1* output = (T1*)((uchar*)dst + element_y * dst_stride);
T1 input_value = input[element_x];
T0 value0, value1, value2, value3;
T1 output_value;
if (channels == 1) {
value0 = scalar0;
value1 = scalar0;
value2 = scalar0;
value3 = scalar0;
}
else if (channels == 3) {
int value = (element_x << 2) % 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar0;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
value2 = scalar0;
value3 = scalar1;
}
else {
value0 = scalar2;
value1 = scalar0;
value2 = scalar1;
value3 = scalar2;
}
}
else { // channel === 4
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar3;
}
if (sizeof(T0) == 1) {
output_value.x = saturate_cast(input_value.x - value0);
output_value.y = saturate_cast(input_value.y - value1);
output_value.z = saturate_cast(input_value.z - value2);
output_value.w = saturate_cast(input_value.w - value3);
}
else {
output_value.x = input_value.x - value0;
output_value.y = input_value.y - value1;
output_value.z = input_value.z - value2;
output_value.w = input_value.w - value3;
}
output[element_x] = output_value;
}
template <typename T0, typename T1>
__global__
void subtractKernel10(const T0* src, int cols, int channels, T0 scalar0,
T0 scalar1, T0 scalar2, T0 scalar3, T0* dst) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const T1* input = (T1*)((uchar*)src);
T1 input_value = input[element_x];
T0 value0, value1, value2, value3;
T1 output_value;
if (channels == 1) {
value0 = scalar0;
value1 = scalar0;
value2 = scalar0;
value3 = scalar0;
}
else if (channels == 3) {
int value = (element_x << 2) % 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar0;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
value2 = scalar0;
value3 = scalar1;
}
else {
value0 = scalar2;
value1 = scalar0;
value2 = scalar1;
value3 = scalar2;
}
}
else { // channel === 4
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar3;
}
if (index_x < cols - 4) {
if (sizeof(T0) == 1) {
output_value.x = saturate_cast(input_value.x - value0);
output_value.y = saturate_cast(input_value.y - value1);
output_value.z = saturate_cast(input_value.z - value2);
output_value.w = saturate_cast(input_value.w - value3);
}
else {
output_value.x = input_value.x - value0;
output_value.y = input_value.y - value1;
output_value.z = input_value.z - value2;
output_value.w = input_value.w - value3;
}
T1* output = (T1*)((uchar*)dst);
output[element_x] = output_value;
}
else {
if (sizeof(T0) == 1) {
output_value.x = saturate_cast(input_value.x - value0);
if (index_x < cols - 1) {
output_value.y = saturate_cast(input_value.y - value1);
}
if (index_x < cols - 2) {
output_value.z = saturate_cast(input_value.z - value2);
}
if (index_x < cols - 3) {
output_value.w = saturate_cast(input_value.w - value3);
}
}
else {
output_value.x = input_value.x - value0;
if (index_x < cols - 1) {
output_value.y = input_value.y - value1;
}
if (index_x < cols - 2) {
output_value.z = input_value.z - value2;
}
if (index_x < cols - 3) {
output_value.w = input_value.w - value3;
}
}
dst[index_x] = output_value.x;
if (index_x < cols - 1) {
dst[index_x + 1] = output_value.y;
}
if (index_x < cols - 2) {
dst[index_x + 2] = output_value.z;
}
if (index_x < cols - 3) {
dst[index_x + 3] = output_value.w;
}
}
}
template <typename T>
__global__
void subtractKernel11(const T* src, int rows, int cols, int channels,
int src_stride, T scalar0, T scalar1, T scalar2,
T scalar3, T* dst, int dst_stride) {
int element_x, element_y;
if (sizeof(T) == 1) {
element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 1;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input = (T*)((uchar*)src + element_y * src_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value0, input_value1;
T value0, value1;
T output_value0, output_value1;
if (channels == 1) {
value0 = scalar0;
value1 = scalar0;
}
else if (channels == 3) {
int value = element_x % 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
}
else {
value0 = scalar2;
value1 = scalar0;
}
}
else { // channel === 4
int value = element_x & 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
}
else if (value == 2) {
value0 = scalar2;
value1 = scalar3;
}
else {
value0 = scalar3;
value1 = scalar0;
}
}
if (blockIdx.x < gridDim.x - 1) {
input_value0 = input[element_x];
input_value1 = input[element_x + 1];
if (sizeof(T) == 1) {
output_value0 = saturate_cast(input_value0 - value0);
output_value1 = saturate_cast(input_value1 - value1);
}
else {
output_value0 = input_value0 - value0;
output_value1 = input_value1 - value1;
}
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value0 = input[element_x];
if (element_x != cols - 1) {
input_value1 = input[element_x + 1];
}
if (element_x != cols - 1) {
if (sizeof(T) == 1) {
output_value0 = saturate_cast(input_value0 - value0);
output_value1 = saturate_cast(input_value1 - value1);
}
else {
output_value0 = input_value0 - value0;
output_value1 = input_value1 - value1;
}
}
else {
if (sizeof(T) == 1) {
output_value0 = saturate_cast(input_value0 - value0);
}
else {
output_value0 = input_value0 - value0;
}
}
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
RetCode subtract(const uchar* src, int rows, int cols, int channels,
int src_stride, const uchar* scalar, uchar* dst,
int dst_stride, hipStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(scalar != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
uchar value0 = 0, value1 = 0, value2 = 0, value3 = 0;
if (channels == 1) {
value0 = scalar[0];
}
else if (channels == 3) {
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
}
else { // channels == 4
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
value3 = scalar[3];
}
if ((src_stride & 3) == 0 && (dst_stride & 3) == 0) {
cols = divideUp(columns, 4, 2);
hipLaunchKernelGGL(( subtractKernel0<uchar, uchar4>), dim3(grid), dim3(block), 0, stream, src, rows, cols,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
else if (src_stride == columns && dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
hipLaunchKernelGGL(( subtractKernel10<uchar, uchar4>), dim3(grid), dim3(block), 0, stream, src, columns,
channels, value0, value1, value2, value3, dst);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
hipLaunchKernelGGL(( subtractKernel11<uchar>), dim3(grid), dim3(block), 0, stream, src, rows, columns,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode subtract(const float* src, int rows, int cols, int channels,
int src_stride, const float* scalar, float* dst,
int dst_stride, hipStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(scalar != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
float value0 = 0.f, value1 = 0.f, value2 = 0.f, value3 = 0.f;
if (channels == 1) {
value0 = scalar[0];
}
else if (channels == 3) {
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
}
else { // channels == 4
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
value3 = scalar[3];
}
if ((src_stride & 15) == 0 && (dst_stride & 15) == 0) {
cols = divideUp(columns, 4, 2);
hipLaunchKernelGGL(( subtractKernel0<float, float4>), dim3(grid), dim3(block), 0, stream, src, rows, cols,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
else if (src_stride == columns * (int)sizeof(float) &&
dst_stride == columns * (int)sizeof(float)) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
hipLaunchKernelGGL(( subtractKernel10<float, float4>), dim3(grid), dim3(block), 0, stream, src, columns,
channels, value0, value1, value2, value3, dst);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX1, kBlockShiftX1);
hipLaunchKernelGGL(( subtractKernel11<float>), dim3(grid), dim3(block), 0, stream, src, rows, columns,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Subtract<uchar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
const uchar* scalar,
int outWidthStride,
uchar* outData) {
RetCode code = subtract(inData, height, width, 1, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<uchar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
const uchar* scalar,
int outWidthStride,
uchar* outData) {
RetCode code = subtract(inData, height, width, 3, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<uchar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
const uchar* scalar,
int outWidthStride,
uchar* outData) {
RetCode code = subtract(inData, height, width, 4, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
const float* scalar,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = subtract(inData, height, width, 1, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
const float* scalar,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = subtract(inData, height, width, 3, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
const float* scalar,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = subtract(inData, height, width, 4, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
/**************************** multiply operation *****************************/
__global__
void multiplyKernel0(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)(src0 + element_y * src0_stride);
const uchar4* input1 = (uchar4*)(src1 + element_y * src1_stride);
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
output_value.y = input_value0.y * input_value1.y;
output_value.z = input_value0.z * input_value1.z;
output_value.w = input_value0.w * input_value1.w;
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
output_value.y = input_value0.y * input_value1.y * scale;
output_value.z = input_value0.z * input_value1.z * scale;
output_value.w = input_value0.w * input_value1.w * scale;
}
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
__global__
void multiplyKernel10(const uchar* src0, int cols, const uchar* src1,
uchar* dst, float scale) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)src0;
const uchar4* input1 = (uchar4*)src1;
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (index_x < cols - 4) {
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
output_value.y = input_value0.y * input_value1.y;
output_value.z = input_value0.z * input_value1.z;
output_value.w = input_value0.w * input_value1.w;
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
output_value.y = input_value0.y * input_value1.y * scale;
output_value.z = input_value0.z * input_value1.z * scale;
output_value.w = input_value0.w * input_value1.w * scale;
}
uchar4* output = (uchar4*)dst;
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
else {
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
if (index_x < cols - 1) {
output_value.y = input_value0.y * input_value1.y;
}
if (index_x < cols - 2) {
output_value.z = input_value0.z * input_value1.z;
}
if (index_x < cols - 3) {
output_value.w = input_value0.w * input_value1.w;
}
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
if (index_x < cols - 1) {
output_value.y = input_value0.y * input_value1.y * scale;
}
if (index_x < cols - 2) {
output_value.z = input_value0.z * input_value1.z * scale;
}
if (index_x < cols - 3) {
output_value.w = input_value0.w * input_value1.w * scale;
}
}
dst[index_x] = saturate_cast(output_value.x);
if (index_x < cols - 1) {
dst[index_x + 1] = saturate_cast(output_value.y);
}
if (index_x < cols - 2) {
dst[index_x + 2] = saturate_cast(output_value.z);
}
if (index_x < cols - 3) {
dst[index_x + 3] = saturate_cast(output_value.w);
}
}
}
template <typename T>
__global__
void multiplyKernel11(const T* src0, int rows, int cols, int src0_stride,
const T* src1, int src1_stride, T* dst, int dst_stride,
float scale) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input0 = (T*)((uchar*)src0 + element_y * src0_stride);
const T* input1 = (T*)((uchar*)src1 + element_y * src1_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value00, input_value01;
T input_value10, input_value11;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
if (scale == 1.f) {
output_value0 = input_value00 * input_value10;
output_value1 = input_value01 * input_value11;
}
else {
output_value0 = input_value00 * input_value10 * scale;
output_value1 = input_value01 * input_value11 * scale;
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
output[element_x + 1] = saturate_cast(output_value1);
}
else {
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
if (scale == 1.f) {
output_value0 = input_value00 * input_value10;
if (element_x != cols - 1) {
output_value1 = input_value01 * input_value11;
}
}
else {
output_value0 = input_value00 * input_value10 * scale;
if (element_x != cols - 1) {
output_value1 = input_value01 * input_value11 * scale;
}
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
if (element_x != cols - 1) {
output[element_x + 1] = saturate_cast(output_value1);
}
}
else {
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
}
__global__
void multiplyKernel0(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input0 = (float2*)((uchar*)src0 + element_y * src0_stride);
const float2* input1 = (float2*)((uchar*)src1 + element_y * src1_stride);
float2 input_value0 = input0[element_x];
float2 input_value1 = input1[element_x];
float2 output_value;
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
output_value.y = input_value0.y * input_value1.y;
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
output_value.y = input_value0.y * input_value1.y * scale;
}
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
RetCode multiply(const uchar* src0, int rows, int cols, int channels,
int src0_stride, const uchar* src1, int src1_stride,
uchar* dst, int dst_stride, float scale, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 3) == 0 && (src1_stride & 3) == 0 &&
(dst_stride & 3) == 0) {
cols = divideUp(columns, 4, 2);
hipLaunchKernelGGL(( multiplyKernel0), dim3(grid), dim3(block), 0, stream, src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else if (src0_stride == columns && src1_stride == columns &&
dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
hipLaunchKernelGGL(( multiplyKernel10), dim3(grid), dim3(block), 0, stream, src0, columns, src1, dst,
scale);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
hipLaunchKernelGGL(( multiplyKernel11<uchar>), dim3(grid), dim3(block), 0, stream, src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode multiply(const float* src0, int rows, int cols, int channels,
int src0_stride, const float* src1, int src1_stride,
float* dst, int dst_stride, float scale, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
hipLaunchKernelGGL(( multiplyKernel0), dim3(grid), dim3(block), 0, stream, src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else {
hipLaunchKernelGGL(( multiplyKernel11<float>), dim3(grid), dim3(block), 0, stream, src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Mul<uchar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = multiply(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<uchar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = multiply(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<uchar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = multiply(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = multiply(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = multiply(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = multiply(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
/***************************** divide operation ******************************/
__global__
void divideKernel0(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)(src0 + element_y * src0_stride);
const uchar4* input1 = (uchar4*)(src1 + element_y * src1_stride);
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 : input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 : input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 : input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 : input_value0.w / input_value1.w;
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 :
scale * input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 :
scale * input_value0.w / input_value1.w;
}
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
__global__
void divideKernel10(const uchar* src0, int cols, const uchar* src1, uchar* dst,
float scale) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)src0;
const uchar4* input1 = (uchar4*)src1;
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (index_x < cols - 4) {
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 :
input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 :
input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 :
input_value0.w / input_value1.w;
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 :
scale * input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 :
scale * input_value0.w / input_value1.w;
}
uchar4* output = (uchar4*)dst;
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
else {
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 :
input_value0.x / input_value1.x;
if (index_x < cols - 1) {
output_value.y = input_value1.y == 0 ? 0 :
input_value0.y / input_value1.y;
}
if (index_x < cols - 2) {
output_value.z = input_value1.z == 0 ? 0 :
input_value0.z / input_value1.z;
}
if (index_x < cols - 3) {
output_value.w = input_value1.w == 0 ? 0 :
input_value0.w / input_value1.w;
}
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
if (index_x < cols - 1) {
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
}
if (index_x < cols - 2) {
output_value.z = input_value1.z == 0 ? 0 :
scale * input_value0.z / input_value1.z;
}
if (index_x < cols - 3) {
output_value.w = input_value1.w == 0 ? 0 :
scale * input_value0.w / input_value1.w;
}
}
dst[index_x] = saturate_cast(output_value.x);
if (index_x < cols - 1) {
dst[index_x + 1] = saturate_cast(output_value.y);
}
if (index_x < cols - 2) {
dst[index_x + 2] = saturate_cast(output_value.z);
}
if (index_x < cols - 3) {
dst[index_x + 3] = saturate_cast(output_value.w);
}
}
}
template <typename T>
__global__
void divideKernel11(const T* src0, int rows, int cols, int src0_stride,
const T* src1, int src1_stride, T* dst, int dst_stride,
float scale) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input0 = (T*)((uchar*)src0 + element_y * src0_stride);
const T* input1 = (T*)((uchar*)src1 + element_y * src1_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value00, input_value01;
T input_value10, input_value11;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
if (scale == 1.f) {
output_value0 = input_value10 == 0 ? 0 : input_value00 / input_value10;
output_value1 = input_value11 == 0 ? 0 : input_value01 / input_value11;
}
else {
output_value0 = input_value10 == 0 ? 0 :
scale * input_value00 / input_value10;
output_value1 = input_value11 == 0 ? 0 :
scale * input_value01 / input_value11;
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
output[element_x + 1] = saturate_cast(output_value1);
}
else {
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
if (scale == 1.f) {
output_value0 = input_value10 == 0 ? 0 : input_value00 / input_value10;
if (element_x != cols - 1) {
output_value1 = input_value11 == 0 ? 0 : input_value01 / input_value11;
}
}
else {
output_value0 = input_value10 == 0 ? 0 :
scale * input_value00 / input_value10;
if (element_x != cols - 1) {
output_value1 = input_value11 == 0 ? 0 :
scale * input_value01 / input_value11;
}
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
if (element_x != cols - 1) {
output[element_x + 1] = saturate_cast(output_value1);
}
}
else {
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
}
__global__
void divideKernel0(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input0 = (float2*)((uchar*)src0 + element_y * src0_stride);
const float2* input1 = (float2*)((uchar*)src1 + element_y * src1_stride);
float2 input_value0 = input0[element_x];
float2 input_value1 = input1[element_x];
float2 output_value;
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 : input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 : input_value0.y / input_value1.y;
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
}
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
RetCode divide(const uchar* src0, int rows, int cols, int channels,
int src0_stride, const uchar* src1, int src1_stride,
uchar* dst, int dst_stride, float scale, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 3) == 0 && (src1_stride & 3) == 0 &&
(dst_stride & 3) == 0) {
cols = divideUp(columns, 4, 2);
hipLaunchKernelGGL(( divideKernel0), dim3(grid), dim3(block), 0, stream, src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else if (src0_stride == columns && src1_stride == columns &&
dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
hipLaunchKernelGGL(( divideKernel10), dim3(grid), dim3(block), 0, stream, src0, columns, src1, dst,
scale);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
hipLaunchKernelGGL(( divideKernel11<uchar>), dim3(grid), dim3(block), 0, stream, src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode divide(const float* src0, int rows, int cols, int channels,
int src0_stride, const float* src1, int src1_stride,
float* dst, int dst_stride, float scale, hipStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
hipLaunchKernelGGL(( divideKernel0), dim3(grid), dim3(block), 0, stream, src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else {
hipLaunchKernelGGL(( divideKernel11<float>), dim3(grid), dim3(block), 0, stream, src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Div<uchar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = divide(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<uchar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = divide(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<uchar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = divide(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = divide(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = divide(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = divide(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
| 17ba59ceb4e0ed575e15ccdaa5ba6d641edfa85d.cu | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/arithmetic.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
/******************************* add operation *******************************/
__global__
void addKernel0(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)(src0 + element_y * src0_stride);
const uchar4* input1 = (uchar4*)(src1 + element_y * src1_stride);
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
uchar4 output_value;
output_value.x = saturate_cast((int)input_value0.x + input_value1.x);
output_value.y = saturate_cast((int)input_value0.y + input_value1.y);
output_value.z = saturate_cast((int)input_value0.z + input_value1.z);
output_value.w = saturate_cast((int)input_value0.w + input_value1.w);
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void addKernel10(const uchar* src0, int cols, const uchar* src1, uchar* dst) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)src0;
const uchar4* input1 = (uchar4*)src1;
uchar4 input_value0, input_value1, output_value;
input_value0 = input0[element_x];
input_value1 = input1[element_x];
if (index_x < cols - 4) {
output_value.x = saturate_cast((int)input_value0.x + input_value1.x);
output_value.y = saturate_cast((int)input_value0.y + input_value1.y);
output_value.z = saturate_cast((int)input_value0.z + input_value1.z);
output_value.w = saturate_cast((int)input_value0.w + input_value1.w);
uchar4* output = (uchar4*)dst;
output[element_x] = output_value;
}
else {
output_value.x = saturate_cast((int)input_value0.x + input_value1.x);
if (index_x < cols - 1) {
output_value.y = saturate_cast((int)input_value0.y + input_value1.y);
}
if (index_x < cols - 2) {
output_value.z = saturate_cast((int)input_value0.z + input_value1.z);
}
if (index_x < cols - 3) {
output_value.w = saturate_cast((int)input_value0.w + input_value1.w);
}
dst[index_x] = output_value.x;
if (index_x < cols - 1) {
dst[index_x + 1] = output_value.y;
}
if (index_x < cols - 2) {
dst[index_x + 2] = output_value.z;
}
if (index_x < cols - 3) {
dst[index_x + 3] = output_value.w;
}
}
}
__global__
void addKernel11(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int index_x = element_x << 2;
if (element_y >= rows || index_x >= cols) {
return;
}
const uchar* input0 = src0 + element_y * src0_stride;
const uchar* input1 = src1 + element_y * src1_stride;
uchar* output = dst + element_y * dst_stride;
uchar input_value00, input_value01, input_value02, input_value03;
uchar input_value10, input_value11, input_value12, input_value13;
int output_value0, output_value1, output_value2, output_value3;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[index_x];
input_value01 = input0[index_x + 1];
input_value02 = input0[index_x + 2];
input_value03 = input0[index_x + 3];
input_value10 = input1[index_x];
input_value11 = input1[index_x + 1];
input_value12 = input1[index_x + 2];
input_value13 = input1[index_x + 3];
output_value0 = input_value00 + input_value10;
output_value1 = input_value01 + input_value11;
output_value2 = input_value02 + input_value12;
output_value3 = input_value03 + input_value13;
output[index_x] = saturate_cast(output_value0);
output[index_x + 1] = saturate_cast(output_value1);
output[index_x + 2] = saturate_cast(output_value2);
output[index_x + 3] = saturate_cast(output_value3);
}
else {
input_value00 = input0[index_x];
if (index_x < cols - 1) {
input_value01 = input0[index_x + 1];
}
if (index_x < cols - 2) {
input_value02 = input0[index_x + 2];
}
if (index_x < cols - 3) {
input_value03 = input0[index_x + 3];
}
input_value10 = input1[index_x];
if (index_x < cols - 1) {
input_value11 = input1[index_x + 1];
}
if (index_x < cols - 2) {
input_value12 = input1[index_x + 2];
}
if (index_x < cols - 3) {
input_value13 = input1[index_x + 3];
}
output_value0 = input_value00 + input_value10;
if (index_x < cols - 1) {
output_value1 = input_value01 + input_value11;
}
if (index_x < cols - 2) {
output_value2 = input_value02 + input_value12;
}
if (index_x < cols - 3) {
output_value3 = input_value03 + input_value13;
}
output[index_x] = saturate_cast(output_value0);
if (index_x < cols - 1) {
output[index_x + 1] = saturate_cast(output_value1);
}
if (index_x < cols - 2) {
output[index_x + 2] = saturate_cast(output_value2);
}
if (index_x < cols - 3) {
output[index_x + 3] = saturate_cast(output_value3);
}
}
}
__global__
void addKernel0(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input0 = (float2*)((uchar*)src0 + element_y * src0_stride);
const float2* input1 = (float2*)((uchar*)src1 + element_y * src1_stride);
float2 input_value0 = input0[element_x];
float2 input_value1 = input1[element_x];
float2 output_value;
output_value.x = input_value0.x + input_value1.x;
output_value.y = input_value0.y + input_value1.y;
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void addKernel1(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float* input0 = (float*)((uchar*)src0 + element_y * src0_stride);
const float* input1 = (float*)((uchar*)src1 + element_y * src1_stride);
float* output = (float*)((uchar*)dst + element_y * dst_stride);
float input_value00, input_value01;
float input_value10, input_value11;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
output_value0 = input_value00 + input_value10;
output_value1 = input_value01 + input_value11;
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
output_value0 = input_value00 + input_value10;
if (element_x != cols - 1) {
output_value1 = input_value01 + input_value11;
}
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
RetCode add(const uchar* src0, int rows, int cols, int channels,
int src0_stride, const uchar* src1, int src1_stride, uchar* dst,
int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
cols = divideUp(columns, 4, 2);
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 3) == 0 && (src1_stride & 3) == 0 &&
(dst_stride & 3) == 0) {
addKernel0<<<grid, block, 0, stream>>>(src0, rows, cols, src0_stride, src1,
src1_stride, dst, dst_stride);
}
else if (src0_stride == columns && src1_stride == columns &&
dst_stride == columns ) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
addKernel10<<<grid, block, 0, stream>>>(src0, columns, src1, dst);
}
else {
addKernel11<<<grid, block, 0, stream>>>(src0, rows, cols, src0_stride, src1,
src1_stride, dst, dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode add(const float* src0, int rows, int cols, int channels,
int src0_stride, const float* src1, int src1_stride, float* dst,
int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
addKernel0<<<grid, block, 0, stream>>>(src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride);
}
else {
addKernel1<<<grid, block, 0, stream>>>(src0, rows, columns, src0_stride,
src1, src1_stride, dst, dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Add<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData) {
RetCode code = add(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData) {
RetCode code = add(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData) {
RetCode code = add(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = add(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = add(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
template <>
RetCode Add<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = add(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, stream);
return code;
}
/*************************** addWeighted operation ***************************/
template <typename T0, typename T1>
__global__
void addWeightedKernel0(const T0* src0, int rows, int cols, int src0_stride,
float alpha, const T0* src1, int src1_stride,
float beta, float gamma, T0* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T1* input0 = (T1*)((uchar*)src0 + element_y * src0_stride);
const T1* input1 = (T1*)((uchar*)src1 + element_y * src1_stride);
T1* output = (T1*)((uchar*)dst + element_y * dst_stride);
T1 input_value0 = input0[element_x];
T1 input_value1 = input1[element_x];
float2 output_value0 = make_float2(gamma, gamma);
output_value0.x += input_value0.x * alpha;
output_value0.y += input_value0.y * alpha;
output_value0.x += input_value1.x * beta;
output_value0.y += input_value1.y * beta;
output[element_x] = saturate_cast_vector<T1, float2>(output_value0);
}
template <typename T>
__global__
void addWeightedKernel1(const T* src0, int rows, int cols, int src0_stride,
float alpha, const T* src1, int src1_stride,
float beta, float gamma, T* dst, int dst_stride) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input0 = (T*)((uchar*)src0 + element_y * src0_stride);
const T* input1 = (T*)((uchar*)src1 + element_y * src1_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value00, input_value01;
T input_value10, input_value11;
float output_value0 = gamma;
float output_value1 = gamma;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
output_value0 += input_value00 * alpha;
output_value1 += input_value01 * alpha;
output_value0 += input_value10 * beta;
output_value1 += input_value11 * beta;
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
output_value0 += input_value00 * alpha;
output_value0 += input_value10 * beta;
if (element_x != cols - 1) {
output_value1 += input_value01 * alpha;
output_value1 += input_value11 * beta;
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
if (element_x != cols - 1) {
output[element_x + 1] = saturate_cast(output_value1);
}
}
else {
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
}
RetCode addWeighted(const uchar* src0, int rows, int cols, int channels,
int src0_stride, float alpha, const uchar* src1,
int src1_stride, float beta, float gamma, uchar* dst,
int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 1) == 0 && (src1_stride & 1) == 0 &&
(dst_stride & 1) == 0) {
cols = divideUp(columns, 2, 1);
addWeightedKernel0<uchar, uchar2><<<grid, block, 0, stream>>>(src0, rows,
cols, src0_stride, alpha, src1, src1_stride, beta, gamma, dst,
dst_stride);
}
else {
addWeightedKernel1<uchar><<<grid, block, 0, stream>>>(src0, rows, columns,
src0_stride, alpha, src1, src1_stride, beta, gamma, dst, dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode addWeighted(const float* src0, int rows, int cols, int channels,
int src0_stride, float alpha, const float* src1,
int src1_stride, float beta, float gamma, float* dst,
int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
addWeightedKernel0<float, float2><<<grid, block, 0, stream>>>(src0, rows,
cols, src0_stride, alpha, src1, src1_stride, beta, gamma, dst,
dst_stride);
}
else {
addWeightedKernel1<float><<<grid, block, 0, stream>>>(src0, rows, columns,
src0_stride, alpha, src1, src1_stride, beta, gamma, dst, dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode AddWeighted<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
float alpha,
int inWidthStride1,
const uchar* inData1,
float beta,
float gamma,
int outWidthStride,
uchar* outData) {
RetCode code = addWeighted(inData0, height, width, 1, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
float alpha,
int inWidthStride1,
const uchar* inData1,
float beta,
float gamma,
int outWidthStride,
uchar* outData) {
RetCode code = addWeighted(inData0, height, width, 3, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
float alpha,
int inWidthStride1,
const uchar* inData1,
float beta,
float gamma,
int outWidthStride,
uchar* outData) {
RetCode code = addWeighted(inData0, height, width, 4, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
float alpha,
int inWidthStride1,
const float* inData1,
float beta,
float gamma,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = addWeighted(inData0, height, width, 1, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
float alpha,
int inWidthStride1,
const float* inData1,
float beta,
float gamma,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = addWeighted(inData0, height, width, 3, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode AddWeighted<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
float alpha,
int inWidthStride1,
const float* inData1,
float beta,
float gamma,
int outWidthStride,
float* outData) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = addWeighted(inData0, height, width, 4, inWidthStride0, alpha,
inData1, inWidthStride1, beta, gamma, outData,
outWidthStride, stream);
return code;
}
/**************************** subtract operation *****************************/
template <typename T0, typename T1>
__global__
void subtractKernel0(const T0* src, int rows, int cols, int channels,
int src_stride, T0 scalar0, T0 scalar1, T0 scalar2,
T0 scalar3, T0* dst, int dst_stride) {
int element_x, element_y;
if (sizeof(T0) == 1) {
element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= rows || element_x >= cols) {
return;
}
const T1* input = (T1*)((uchar*)src + element_y * src_stride);
T1* output = (T1*)((uchar*)dst + element_y * dst_stride);
T1 input_value = input[element_x];
T0 value0, value1, value2, value3;
T1 output_value;
if (channels == 1) {
value0 = scalar0;
value1 = scalar0;
value2 = scalar0;
value3 = scalar0;
}
else if (channels == 3) {
int value = (element_x << 2) % 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar0;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
value2 = scalar0;
value3 = scalar1;
}
else {
value0 = scalar2;
value1 = scalar0;
value2 = scalar1;
value3 = scalar2;
}
}
else { // channel === 4
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar3;
}
if (sizeof(T0) == 1) {
output_value.x = saturate_cast(input_value.x - value0);
output_value.y = saturate_cast(input_value.y - value1);
output_value.z = saturate_cast(input_value.z - value2);
output_value.w = saturate_cast(input_value.w - value3);
}
else {
output_value.x = input_value.x - value0;
output_value.y = input_value.y - value1;
output_value.z = input_value.z - value2;
output_value.w = input_value.w - value3;
}
output[element_x] = output_value;
}
template <typename T0, typename T1>
__global__
void subtractKernel10(const T0* src, int cols, int channels, T0 scalar0,
T0 scalar1, T0 scalar2, T0 scalar3, T0* dst) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const T1* input = (T1*)((uchar*)src);
T1 input_value = input[element_x];
T0 value0, value1, value2, value3;
T1 output_value;
if (channels == 1) {
value0 = scalar0;
value1 = scalar0;
value2 = scalar0;
value3 = scalar0;
}
else if (channels == 3) {
int value = (element_x << 2) % 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar0;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
value2 = scalar0;
value3 = scalar1;
}
else {
value0 = scalar2;
value1 = scalar0;
value2 = scalar1;
value3 = scalar2;
}
}
else { // channel === 4
value0 = scalar0;
value1 = scalar1;
value2 = scalar2;
value3 = scalar3;
}
if (index_x < cols - 4) {
if (sizeof(T0) == 1) {
output_value.x = saturate_cast(input_value.x - value0);
output_value.y = saturate_cast(input_value.y - value1);
output_value.z = saturate_cast(input_value.z - value2);
output_value.w = saturate_cast(input_value.w - value3);
}
else {
output_value.x = input_value.x - value0;
output_value.y = input_value.y - value1;
output_value.z = input_value.z - value2;
output_value.w = input_value.w - value3;
}
T1* output = (T1*)((uchar*)dst);
output[element_x] = output_value;
}
else {
if (sizeof(T0) == 1) {
output_value.x = saturate_cast(input_value.x - value0);
if (index_x < cols - 1) {
output_value.y = saturate_cast(input_value.y - value1);
}
if (index_x < cols - 2) {
output_value.z = saturate_cast(input_value.z - value2);
}
if (index_x < cols - 3) {
output_value.w = saturate_cast(input_value.w - value3);
}
}
else {
output_value.x = input_value.x - value0;
if (index_x < cols - 1) {
output_value.y = input_value.y - value1;
}
if (index_x < cols - 2) {
output_value.z = input_value.z - value2;
}
if (index_x < cols - 3) {
output_value.w = input_value.w - value3;
}
}
dst[index_x] = output_value.x;
if (index_x < cols - 1) {
dst[index_x + 1] = output_value.y;
}
if (index_x < cols - 2) {
dst[index_x + 2] = output_value.z;
}
if (index_x < cols - 3) {
dst[index_x + 3] = output_value.w;
}
}
}
template <typename T>
__global__
void subtractKernel11(const T* src, int rows, int cols, int channels,
int src_stride, T scalar0, T scalar1, T scalar2,
T scalar3, T* dst, int dst_stride) {
int element_x, element_y;
if (sizeof(T) == 1) {
element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 1;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input = (T*)((uchar*)src + element_y * src_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value0, input_value1;
T value0, value1;
T output_value0, output_value1;
if (channels == 1) {
value0 = scalar0;
value1 = scalar0;
}
else if (channels == 3) {
int value = element_x % 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
}
else {
value0 = scalar2;
value1 = scalar0;
}
}
else { // channel === 4
int value = element_x & 3;
if (value == 0) {
value0 = scalar0;
value1 = scalar1;
}
else if (value == 1) {
value0 = scalar1;
value1 = scalar2;
}
else if (value == 2) {
value0 = scalar2;
value1 = scalar3;
}
else {
value0 = scalar3;
value1 = scalar0;
}
}
if (blockIdx.x < gridDim.x - 1) {
input_value0 = input[element_x];
input_value1 = input[element_x + 1];
if (sizeof(T) == 1) {
output_value0 = saturate_cast(input_value0 - value0);
output_value1 = saturate_cast(input_value1 - value1);
}
else {
output_value0 = input_value0 - value0;
output_value1 = input_value1 - value1;
}
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value0 = input[element_x];
if (element_x != cols - 1) {
input_value1 = input[element_x + 1];
}
if (element_x != cols - 1) {
if (sizeof(T) == 1) {
output_value0 = saturate_cast(input_value0 - value0);
output_value1 = saturate_cast(input_value1 - value1);
}
else {
output_value0 = input_value0 - value0;
output_value1 = input_value1 - value1;
}
}
else {
if (sizeof(T) == 1) {
output_value0 = saturate_cast(input_value0 - value0);
}
else {
output_value0 = input_value0 - value0;
}
}
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
RetCode subtract(const uchar* src, int rows, int cols, int channels,
int src_stride, const uchar* scalar, uchar* dst,
int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(scalar != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
uchar value0 = 0, value1 = 0, value2 = 0, value3 = 0;
if (channels == 1) {
value0 = scalar[0];
}
else if (channels == 3) {
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
}
else { // channels == 4
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
value3 = scalar[3];
}
if ((src_stride & 3) == 0 && (dst_stride & 3) == 0) {
cols = divideUp(columns, 4, 2);
subtractKernel0<uchar, uchar4><<<grid, block, 0, stream>>>(src, rows, cols,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
else if (src_stride == columns && dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
subtractKernel10<uchar, uchar4><<<grid, block, 0, stream>>>(src, columns,
channels, value0, value1, value2, value3, dst);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
subtractKernel11<uchar><<<grid, block, 0, stream>>>(src, rows, columns,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode subtract(const float* src, int rows, int cols, int channels,
int src_stride, const float* scalar, float* dst,
int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(scalar != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
float value0 = 0.f, value1 = 0.f, value2 = 0.f, value3 = 0.f;
if (channels == 1) {
value0 = scalar[0];
}
else if (channels == 3) {
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
}
else { // channels == 4
value0 = scalar[0];
value1 = scalar[1];
value2 = scalar[2];
value3 = scalar[3];
}
if ((src_stride & 15) == 0 && (dst_stride & 15) == 0) {
cols = divideUp(columns, 4, 2);
subtractKernel0<float, float4><<<grid, block, 0, stream>>>(src, rows, cols,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
else if (src_stride == columns * (int)sizeof(float) &&
dst_stride == columns * (int)sizeof(float)) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
subtractKernel10<float, float4><<<grid, block, 0, stream>>>(src, columns,
channels, value0, value1, value2, value3, dst);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX1, kBlockShiftX1);
subtractKernel11<float><<<grid, block, 0, stream>>>(src, rows, columns,
channels, src_stride, value0, value1, value2, value3, dst, dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Subtract<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
const uchar* scalar,
int outWidthStride,
uchar* outData) {
RetCode code = subtract(inData, height, width, 1, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
const uchar* scalar,
int outWidthStride,
uchar* outData) {
RetCode code = subtract(inData, height, width, 3, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
const uchar* scalar,
int outWidthStride,
uchar* outData) {
RetCode code = subtract(inData, height, width, 4, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
const float* scalar,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = subtract(inData, height, width, 1, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
const float* scalar,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = subtract(inData, height, width, 3, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
template <>
RetCode Subtract<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
const float* scalar,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = subtract(inData, height, width, 4, inWidthStride, scalar,
outData, outWidthStride, stream);
return code;
}
/**************************** multiply operation *****************************/
__global__
void multiplyKernel0(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)(src0 + element_y * src0_stride);
const uchar4* input1 = (uchar4*)(src1 + element_y * src1_stride);
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
output_value.y = input_value0.y * input_value1.y;
output_value.z = input_value0.z * input_value1.z;
output_value.w = input_value0.w * input_value1.w;
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
output_value.y = input_value0.y * input_value1.y * scale;
output_value.z = input_value0.z * input_value1.z * scale;
output_value.w = input_value0.w * input_value1.w * scale;
}
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
__global__
void multiplyKernel10(const uchar* src0, int cols, const uchar* src1,
uchar* dst, float scale) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)src0;
const uchar4* input1 = (uchar4*)src1;
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (index_x < cols - 4) {
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
output_value.y = input_value0.y * input_value1.y;
output_value.z = input_value0.z * input_value1.z;
output_value.w = input_value0.w * input_value1.w;
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
output_value.y = input_value0.y * input_value1.y * scale;
output_value.z = input_value0.z * input_value1.z * scale;
output_value.w = input_value0.w * input_value1.w * scale;
}
uchar4* output = (uchar4*)dst;
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
else {
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
if (index_x < cols - 1) {
output_value.y = input_value0.y * input_value1.y;
}
if (index_x < cols - 2) {
output_value.z = input_value0.z * input_value1.z;
}
if (index_x < cols - 3) {
output_value.w = input_value0.w * input_value1.w;
}
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
if (index_x < cols - 1) {
output_value.y = input_value0.y * input_value1.y * scale;
}
if (index_x < cols - 2) {
output_value.z = input_value0.z * input_value1.z * scale;
}
if (index_x < cols - 3) {
output_value.w = input_value0.w * input_value1.w * scale;
}
}
dst[index_x] = saturate_cast(output_value.x);
if (index_x < cols - 1) {
dst[index_x + 1] = saturate_cast(output_value.y);
}
if (index_x < cols - 2) {
dst[index_x + 2] = saturate_cast(output_value.z);
}
if (index_x < cols - 3) {
dst[index_x + 3] = saturate_cast(output_value.w);
}
}
}
template <typename T>
__global__
void multiplyKernel11(const T* src0, int rows, int cols, int src0_stride,
const T* src1, int src1_stride, T* dst, int dst_stride,
float scale) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input0 = (T*)((uchar*)src0 + element_y * src0_stride);
const T* input1 = (T*)((uchar*)src1 + element_y * src1_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value00, input_value01;
T input_value10, input_value11;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
if (scale == 1.f) {
output_value0 = input_value00 * input_value10;
output_value1 = input_value01 * input_value11;
}
else {
output_value0 = input_value00 * input_value10 * scale;
output_value1 = input_value01 * input_value11 * scale;
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
output[element_x + 1] = saturate_cast(output_value1);
}
else {
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
if (scale == 1.f) {
output_value0 = input_value00 * input_value10;
if (element_x != cols - 1) {
output_value1 = input_value01 * input_value11;
}
}
else {
output_value0 = input_value00 * input_value10 * scale;
if (element_x != cols - 1) {
output_value1 = input_value01 * input_value11 * scale;
}
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
if (element_x != cols - 1) {
output[element_x + 1] = saturate_cast(output_value1);
}
}
else {
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
}
__global__
void multiplyKernel0(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input0 = (float2*)((uchar*)src0 + element_y * src0_stride);
const float2* input1 = (float2*)((uchar*)src1 + element_y * src1_stride);
float2 input_value0 = input0[element_x];
float2 input_value1 = input1[element_x];
float2 output_value;
if (scale == 1.f) {
output_value.x = input_value0.x * input_value1.x;
output_value.y = input_value0.y * input_value1.y;
}
else {
output_value.x = input_value0.x * input_value1.x * scale;
output_value.y = input_value0.y * input_value1.y * scale;
}
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
RetCode multiply(const uchar* src0, int rows, int cols, int channels,
int src0_stride, const uchar* src1, int src1_stride,
uchar* dst, int dst_stride, float scale, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 3) == 0 && (src1_stride & 3) == 0 &&
(dst_stride & 3) == 0) {
cols = divideUp(columns, 4, 2);
multiplyKernel0<<<grid, block, 0, stream>>>(src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else if (src0_stride == columns && src1_stride == columns &&
dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
multiplyKernel10<<<grid, block, 0, stream>>>(src0, columns, src1, dst,
scale);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
multiplyKernel11<uchar><<<grid, block, 0, stream>>>(src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode multiply(const float* src0, int rows, int cols, int channels,
int src0_stride, const float* src1, int src1_stride,
float* dst, int dst_stride, float scale, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
multiplyKernel0<<<grid, block, 0, stream>>>(src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else {
multiplyKernel11<float><<<grid, block, 0, stream>>>(src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Mul<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = multiply(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = multiply(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = multiply(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = multiply(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = multiply(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
template <>
RetCode Mul<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = multiply(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale,
stream);
return code;
}
/***************************** divide operation ******************************/
__global__
void divideKernel0(const uchar* src0, int rows, int cols, int src0_stride,
const uchar* src1, int src1_stride, uchar* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)(src0 + element_y * src0_stride);
const uchar4* input1 = (uchar4*)(src1 + element_y * src1_stride);
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 : input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 : input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 : input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 : input_value0.w / input_value1.w;
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 :
scale * input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 :
scale * input_value0.w / input_value1.w;
}
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
__global__
void divideKernel10(const uchar* src0, int cols, const uchar* src1, uchar* dst,
float scale) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const uchar4* input0 = (uchar4*)src0;
const uchar4* input1 = (uchar4*)src1;
uchar4 input_value0 = input0[element_x];
uchar4 input_value1 = input1[element_x];
float4 output_value;
if (index_x < cols - 4) {
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 :
input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 :
input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 :
input_value0.w / input_value1.w;
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
output_value.z = input_value1.z == 0 ? 0 :
scale * input_value0.z / input_value1.z;
output_value.w = input_value1.w == 0 ? 0 :
scale * input_value0.w / input_value1.w;
}
uchar4* output = (uchar4*)dst;
output[element_x] = saturate_cast_vector<uchar4, float4>(output_value);
}
else {
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 :
input_value0.x / input_value1.x;
if (index_x < cols - 1) {
output_value.y = input_value1.y == 0 ? 0 :
input_value0.y / input_value1.y;
}
if (index_x < cols - 2) {
output_value.z = input_value1.z == 0 ? 0 :
input_value0.z / input_value1.z;
}
if (index_x < cols - 3) {
output_value.w = input_value1.w == 0 ? 0 :
input_value0.w / input_value1.w;
}
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
if (index_x < cols - 1) {
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
}
if (index_x < cols - 2) {
output_value.z = input_value1.z == 0 ? 0 :
scale * input_value0.z / input_value1.z;
}
if (index_x < cols - 3) {
output_value.w = input_value1.w == 0 ? 0 :
scale * input_value0.w / input_value1.w;
}
}
dst[index_x] = saturate_cast(output_value.x);
if (index_x < cols - 1) {
dst[index_x + 1] = saturate_cast(output_value.y);
}
if (index_x < cols - 2) {
dst[index_x + 2] = saturate_cast(output_value.z);
}
if (index_x < cols - 3) {
dst[index_x + 3] = saturate_cast(output_value.w);
}
}
}
template <typename T>
__global__
void divideKernel11(const T* src0, int rows, int cols, int src0_stride,
const T* src1, int src1_stride, T* dst, int dst_stride,
float scale) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const T* input0 = (T*)((uchar*)src0 + element_y * src0_stride);
const T* input1 = (T*)((uchar*)src1 + element_y * src1_stride);
T* output = (T*)((uchar*)dst + element_y * dst_stride);
T input_value00, input_value01;
T input_value10, input_value11;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
if (scale == 1.f) {
output_value0 = input_value10 == 0 ? 0 : input_value00 / input_value10;
output_value1 = input_value11 == 0 ? 0 : input_value01 / input_value11;
}
else {
output_value0 = input_value10 == 0 ? 0 :
scale * input_value00 / input_value10;
output_value1 = input_value11 == 0 ? 0 :
scale * input_value01 / input_value11;
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
output[element_x + 1] = saturate_cast(output_value1);
}
else {
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
}
else {
input_value00 = input0[element_x];
if (element_x != cols - 1) {
input_value01 = input0[element_x + 1];
}
input_value10 = input1[element_x];
if (element_x != cols - 1) {
input_value11 = input1[element_x + 1];
}
if (scale == 1.f) {
output_value0 = input_value10 == 0 ? 0 : input_value00 / input_value10;
if (element_x != cols - 1) {
output_value1 = input_value11 == 0 ? 0 : input_value01 / input_value11;
}
}
else {
output_value0 = input_value10 == 0 ? 0 :
scale * input_value00 / input_value10;
if (element_x != cols - 1) {
output_value1 = input_value11 == 0 ? 0 :
scale * input_value01 / input_value11;
}
}
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(output_value0);
if (element_x != cols - 1) {
output[element_x + 1] = saturate_cast(output_value1);
}
}
else {
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
}
__global__
void divideKernel0(const float* src0, int rows, int cols, int src0_stride,
const float* src1, int src1_stride, float* dst,
int dst_stride, float scale) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input0 = (float2*)((uchar*)src0 + element_y * src0_stride);
const float2* input1 = (float2*)((uchar*)src1 + element_y * src1_stride);
float2 input_value0 = input0[element_x];
float2 input_value1 = input1[element_x];
float2 output_value;
if (scale == 1.f) {
output_value.x = input_value1.x == 0 ? 0 : input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 : input_value0.y / input_value1.y;
}
else {
output_value.x = input_value1.x == 0 ? 0 :
scale * input_value0.x / input_value1.x;
output_value.y = input_value1.y == 0 ? 0 :
scale * input_value0.y / input_value1.y;
}
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
RetCode divide(const uchar* src0, int rows, int cols, int channels,
int src0_stride, const uchar* src1, int src1_stride,
uchar* dst, int dst_stride, float scale, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 4, 2), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 3) == 0 && (src1_stride & 3) == 0 &&
(dst_stride & 3) == 0) {
cols = divideUp(columns, 4, 2);
divideKernel0<<<grid, block, 0, stream>>>(src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else if (src0_stride == columns && src1_stride == columns &&
dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
divideKernel10<<<grid, block, 0, stream>>>(src0, columns, src1, dst,
scale);
}
else {
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
divideKernel11<uchar><<<grid, block, 0, stream>>>(src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode divide(const float* src0, int rows, int cols, int channels,
int src0_stride, const float* src1, int src1_stride,
float* dst, int dst_stride, float scale, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src0_stride & 7) == 0 && (src1_stride & 7) == 0 &&
(dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
divideKernel0<<<grid, block, 0, stream>>>(src0, rows, cols, src0_stride,
src1, src1_stride, dst, dst_stride, scale);
}
else {
divideKernel11<float><<<grid, block, 0, stream>>>(src0, rows, columns,
src0_stride, src1, src1_stride, dst, dst_stride, scale);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Div<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = divide(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = divide(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const uchar* inData0,
int inWidthStride1,
const uchar* inData1,
int outWidthStride,
uchar* outData,
float scale) {
RetCode code = divide(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = divide(inData0, height, width, 1, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = divide(inData0, height, width, 3, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
template <>
RetCode Div<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride0,
const float* inData0,
int inWidthStride1,
const float* inData1,
int outWidthStride,
float* outData,
float scale) {
inWidthStride0 *= sizeof(float);
inWidthStride1 *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = divide(inData0, height, width, 4, inWidthStride0, inData1,
inWidthStride1, outData, outWidthStride, scale, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
anchorGeneratorTorch.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <stdio.h>
#include <iostream>
#include "anchorGeneratorTorch.h"
__global__ void anchorGenerator(AnchorParamsTorch params, float *output)
{
const int dim = params.featureSize.nHeight * params.featureSize.nWidth * 9;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= dim)
{
return;
}
//currentIndex:
//arId:9
int currentIndex, arId;
currentIndex = tid / 9;
arId = tid - (currentIndex * 9);
int col = currentIndex % params.featureSize.nWidth;//
int row = currentIndex / params.featureSize.nWidth;//
float x0 = params.fBaseAnchor[arId].x0 + col * params.nStride;
float y0 = params.fBaseAnchor[arId].y0 + row * params.nStride;
float x1 = params.fBaseAnchor[arId].x1 + col * params.nStride;
float y1 = params.fBaseAnchor[arId].y1 + row * params.nStride;
output[tid * 4] = x0;
output[tid * 4 + 1] = y0;
output[tid * 4 + 2] = x1;
output[tid * 4 + 3] = y1;
//if (tid < 3)
//{
// printf("x0= %f\n", x0);
// printf("y0= %f\n", y0);
// printf("x1= %f\n", x1);
// printf("y1= %f\n", y1);
// printf("*********************\n");
//}
}
extern "C" pluginStatus_t anchorGridTorch(hipStream_t stream, AnchorParamsTorch params, void *output)
{
const int dims = params.featureSize.nHeight * params.featureSize.nWidth * 9;
//std::cout << "dims" << dims << std::endl;
const int BS = 128;
const int GS = (dims + BS - 1) / BS;
anchorGenerator << < GS, BS>> > (params, (float*)output);
return STATUS_SUCCESS;
} | anchorGeneratorTorch.cu | #include "kernel.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <stdio.h>
#include <iostream>
#include "anchorGeneratorTorch.h"
__global__ void anchorGenerator(AnchorParamsTorch params, float *output)
{
const int dim = params.featureSize.nHeight * params.featureSize.nWidth * 9;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= dim)
{
return;
}
//currentIndex:表示锚框所在位置的序号
//arId:表示锚框在当前位置的排序(一个位置存在9个锚框)
int currentIndex, arId;
currentIndex = tid / 9;
arId = tid - (currentIndex * 9);
int col = currentIndex % params.featureSize.nWidth;//列号
int row = currentIndex / params.featureSize.nWidth;//行号
float x0 = params.fBaseAnchor[arId].x0 + col * params.nStride;
float y0 = params.fBaseAnchor[arId].y0 + row * params.nStride;
float x1 = params.fBaseAnchor[arId].x1 + col * params.nStride;
float y1 = params.fBaseAnchor[arId].y1 + row * params.nStride;
output[tid * 4] = x0;
output[tid * 4 + 1] = y0;
output[tid * 4 + 2] = x1;
output[tid * 4 + 3] = y1;
//if (tid < 3)
//{
// printf("x0= %f\n", x0);
// printf("y0= %f\n", y0);
// printf("x1= %f\n", x1);
// printf("y1= %f\n", y1);
// printf("*********************\n");
//}
}
extern "C" pluginStatus_t anchorGridTorch(cudaStream_t stream, AnchorParamsTorch params, void *output)
{
const int dims = params.featureSize.nHeight * params.featureSize.nWidth * 9;
//std::cout << "dims" << dims << std::endl;
const int BS = 128;
const int GS = (dims + BS - 1) / BS;
anchorGenerator << < GS, BS>> > (params, (float*)output);
return STATUS_SUCCESS;
} |
ed4c74aba26395c990ea6fceb20505d1733ed968.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include <set>
#include <vector>
#include "test_utils.h"
namespace raft {
namespace random {
// Terminology:
// SWoR - Sample Without Replacement
template <typename T>
struct SWoRInputs {
int len, sampledLen;
int largeWeightIndex;
T largeWeight;
GeneratorType gtype;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SWoRInputs<T>& dims) {
return os;
}
template <typename T>
class SWoRTest : public ::testing::TestWithParam<SWoRInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SWoRInputs<T>>::GetParam();
CUDA_CHECK(hipStreamCreate(&stream));
Rng r(params.seed, params.gtype);
allocate(in, params.len);
allocate(wts, params.len);
allocate(out, params.sampledLen);
allocate(outIdx, params.sampledLen);
h_outIdx.resize(params.sampledLen);
r.uniform(in, params.len, T(-1.0), T(1.0), stream);
r.uniform(wts, params.len, T(1.0), T(2.0), stream);
if (params.largeWeightIndex >= 0) {
update_device(wts + params.largeWeightIndex, ¶ms.largeWeight, 1,
stream);
}
r.sampleWithoutReplacement(handle, out, outIdx, in, wts, params.sampledLen,
params.len, stream);
update_host(&(h_outIdx[0]), outIdx, params.sampledLen, stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(wts));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(outIdx));
}
protected:
SWoRInputs<T> params;
T *in, *out, *wts;
int* outIdx;
std::vector<int> h_outIdx;
hipStream_t stream;
raft::handle_t handle;
};
typedef SWoRTest<float> SWoRTestF;
const std::vector<SWoRInputs<float>> inputsf = {
{1024, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.f, GenPhilox, 1234ULL},
{1024, 512, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 512, 10, 100000.f, GenTaps, 1234ULL},
{1024, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.f, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestF, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestF, ::testing::ValuesIn(inputsf));
typedef SWoRTest<double> SWoRTestD;
const std::vector<SWoRInputs<double>> inputsd = {
{1024, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.0, GenPhilox, 1234ULL},
{1024, 512, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 512, 10, 100000.0, GenTaps, 1234ULL},
{1024, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.0, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestD, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestD, ::testing::ValuesIn(inputsd));
} // namespace random
} // namespace raft
| ed4c74aba26395c990ea6fceb20505d1733ed968.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cuda_utils.cuh>
#include <random/rng.cuh>
#include <set>
#include <vector>
#include "test_utils.h"
namespace raft {
namespace random {
// Terminology:
// SWoR - Sample Without Replacement
template <typename T>
struct SWoRInputs {
int len, sampledLen;
int largeWeightIndex;
T largeWeight;
GeneratorType gtype;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const SWoRInputs<T>& dims) {
return os;
}
template <typename T>
class SWoRTest : public ::testing::TestWithParam<SWoRInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<SWoRInputs<T>>::GetParam();
CUDA_CHECK(cudaStreamCreate(&stream));
Rng r(params.seed, params.gtype);
allocate(in, params.len);
allocate(wts, params.len);
allocate(out, params.sampledLen);
allocate(outIdx, params.sampledLen);
h_outIdx.resize(params.sampledLen);
r.uniform(in, params.len, T(-1.0), T(1.0), stream);
r.uniform(wts, params.len, T(1.0), T(2.0), stream);
if (params.largeWeightIndex >= 0) {
update_device(wts + params.largeWeightIndex, ¶ms.largeWeight, 1,
stream);
}
r.sampleWithoutReplacement(handle, out, outIdx, in, wts, params.sampledLen,
params.len, stream);
update_host(&(h_outIdx[0]), outIdx, params.sampledLen, stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(wts));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(outIdx));
}
protected:
SWoRInputs<T> params;
T *in, *out, *wts;
int* outIdx;
std::vector<int> h_outIdx;
cudaStream_t stream;
raft::handle_t handle;
};
typedef SWoRTest<float> SWoRTestF;
const std::vector<SWoRInputs<float>> inputsf = {
{1024, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.f, GenPhilox, 1234ULL},
{1024, 512, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenTaps, 1234ULL},
{1024, 512, 10, 100000.f, GenTaps, 1234ULL},
{1024, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.f, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.f, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestF, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestF, ::testing::ValuesIn(inputsf));
typedef SWoRTest<double> SWoRTestD;
const std::vector<SWoRInputs<double>> inputsd = {
{1024, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenPhilox, 1234ULL},
{1024, 512, 10, 100000.0, GenPhilox, 1234ULL},
{1024, 512, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenTaps, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenTaps, 1234ULL},
{1024, 512, 10, 100000.0, GenTaps, 1234ULL},
{1024, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 1, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 1, 1024 - 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 1, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 512 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024 + 2, 1024 + 2, -1, 0.0, GenKiss99, 1234ULL},
{1024, 512, 10, 100000.0, GenKiss99, 1234ULL},
};
TEST_P(SWoRTestD, Result) {
std::set<int> occurence;
for (int i = 0; i < params.sampledLen; ++i) {
auto val = h_outIdx[i];
// indices must be in the given range
ASSERT_TRUE(0 <= val && val < params.len)
<< "out-of-range index @i=" << i << " val=" << val
<< " sampledLen=" << params.sampledLen;
// indices should not repeat
ASSERT_TRUE(occurence.find(val) == occurence.end())
<< "repeated index @i=" << i << " idx=" << val;
occurence.insert(val);
}
// if there's a skewed distribution, the top index should correspond to the
// particular item with a large weight
if (params.largeWeightIndex >= 0) {
ASSERT_EQ(h_outIdx[0], params.largeWeightIndex);
}
}
INSTANTIATE_TEST_CASE_P(SWoRTests, SWoRTestD, ::testing::ValuesIn(inputsd));
} // namespace random
} // namespace raft
|
f0b8d2b6f1584c58184549d6c5f9096d5bb08aef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Author: Sudnya Padalikar
// Date: 01/23/2014
// Brief: simple matrix multiplication kernel in cuda
#include <stdio.h>
#include <cassert>
#include <iostream>
// Kernel that executes on the CUDA device
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
//bounds check
if (rowIdx < numCRows && colIdx < numCColumns) {
float temp = 0.0;
for (int i = 0; i < numBRows; ++i) {
temp += A[(rowIdx*numAColumns)+i]*B[colIdx+(i*numBColumns)];
}
C[(rowIdx*numCColumns)+colIdx] = temp;
}
}
void printMatrix(float* M, int rows, int columns) {
for (int i = 0; i < rows; ++i) {
std::cout << "[ ";
for (int j = 0; j < columns; ++j) {
std::cout << " " << M[(i*columns) + j];
}
std::cout << " ]\n";
}
std::cout << "\n";
}
int main()
{
float *a_h;
float *a_d; // Pointer to host & device arrays
float *b_h;
float *b_d;
float *c_h;
float *c_d;
int numARows = 3; // number of rows in the matrix A
int numAColumns = 4; // number of columns in the matrix A
int numBRows = 4; // number of rows in the matrix B
int numBColumns = 2; // number of columns in the matrix B
int numCRows = 3; // number of rows in the matrix C (you have to set this)
int numCColumns = 2; // number of columns in the matrix C (you have to set this)
size_t a_bytes = numARows * numAColumns * sizeof(float);
size_t b_bytes = numBRows * numBColumns * sizeof(float);
size_t c_bytes = numCRows * numCColumns * sizeof(float);
assert(numAColumns == numBRows);
assert(numARows == numCRows);
assert(numBColumns == numCColumns);
a_h = (float *)malloc(a_bytes); // Allocate array on host
hipMalloc((void **) &a_d, a_bytes); // Allocate array on device
b_h = (float *)malloc(b_bytes); // Allocate array on host
hipMalloc((void **) &b_d, b_bytes); // Allocate array on device
// initialize A, B
for (int i = 0; i < numARows; ++i) {
for (int j = 0; j < numAColumns; ++j) {
a_h[(i*numAColumns) + j] = (i*numAColumns) + j;
}
}
printMatrix(a_h, numARows, numAColumns);
for (int i = 0; i < numBRows; ++i) {
for (int j = 0; j < numBColumns; ++j) {
b_h[(i*numBColumns) +j] = (i*numBColumns) + j;
}
}
printMatrix(b_h, numBRows, numBColumns);
c_h = (float *)malloc(c_bytes); // Allocate array on host
hipMalloc((void **) &c_d, c_bytes); // Allocate array on device
hipMemcpy(a_d, a_h, a_bytes, hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, b_bytes, hipMemcpyHostToDevice);
// Do calculation on device:
dim3 block_size = dim3(16, 16, 1);
dim3 num_blocks = dim3((numCColumns + 16 - 1)/16, (numCRows + 16 - 1)/ 16);
hipLaunchKernelGGL(( matrixMultiply) , dim3(num_blocks), dim3(block_size) , 0, 0, a_d, b_d, c_d, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// Retrieve result from device and store it in host array
hipMemcpy(c_h, c_d, c_bytes, hipMemcpyDeviceToHost);
printMatrix(c_h, numCRows, numCColumns);
// Cleanup
free(a_h);
hipFree(a_d);
free(b_h);
hipFree(b_d);
free(c_h);
hipFree(c_d);
}
| f0b8d2b6f1584c58184549d6c5f9096d5bb08aef.cu | // Author: Sudnya Padalikar
// Date: 01/23/2014
// Brief: simple matrix multiplication kernel in cuda
#include <stdio.h>
#include <cassert>
#include <iostream>
// Kernel that executes on the CUDA device
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
//bounds check
if (rowIdx < numCRows && colIdx < numCColumns) {
float temp = 0.0;
for (int i = 0; i < numBRows; ++i) {
temp += A[(rowIdx*numAColumns)+i]*B[colIdx+(i*numBColumns)];
}
C[(rowIdx*numCColumns)+colIdx] = temp;
}
}
void printMatrix(float* M, int rows, int columns) {
for (int i = 0; i < rows; ++i) {
std::cout << "[ ";
for (int j = 0; j < columns; ++j) {
std::cout << " " << M[(i*columns) + j];
}
std::cout << " ]\n";
}
std::cout << "\n";
}
int main()
{
float *a_h;
float *a_d; // Pointer to host & device arrays
float *b_h;
float *b_d;
float *c_h;
float *c_d;
int numARows = 3; // number of rows in the matrix A
int numAColumns = 4; // number of columns in the matrix A
int numBRows = 4; // number of rows in the matrix B
int numBColumns = 2; // number of columns in the matrix B
int numCRows = 3; // number of rows in the matrix C (you have to set this)
int numCColumns = 2; // number of columns in the matrix C (you have to set this)
size_t a_bytes = numARows * numAColumns * sizeof(float);
size_t b_bytes = numBRows * numBColumns * sizeof(float);
size_t c_bytes = numCRows * numCColumns * sizeof(float);
assert(numAColumns == numBRows);
assert(numARows == numCRows);
assert(numBColumns == numCColumns);
a_h = (float *)malloc(a_bytes); // Allocate array on host
cudaMalloc((void **) &a_d, a_bytes); // Allocate array on device
b_h = (float *)malloc(b_bytes); // Allocate array on host
cudaMalloc((void **) &b_d, b_bytes); // Allocate array on device
// initialize A, B
for (int i = 0; i < numARows; ++i) {
for (int j = 0; j < numAColumns; ++j) {
a_h[(i*numAColumns) + j] = (i*numAColumns) + j;
}
}
printMatrix(a_h, numARows, numAColumns);
for (int i = 0; i < numBRows; ++i) {
for (int j = 0; j < numBColumns; ++j) {
b_h[(i*numBColumns) +j] = (i*numBColumns) + j;
}
}
printMatrix(b_h, numBRows, numBColumns);
c_h = (float *)malloc(c_bytes); // Allocate array on host
cudaMalloc((void **) &c_d, c_bytes); // Allocate array on device
cudaMemcpy(a_d, a_h, a_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, b_bytes, cudaMemcpyHostToDevice);
// Do calculation on device:
dim3 block_size = dim3(16, 16, 1);
dim3 num_blocks = dim3((numCColumns + 16 - 1)/16, (numCRows + 16 - 1)/ 16);
matrixMultiply <<< num_blocks, block_size >>> (a_d, b_d, c_d, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// Retrieve result from device and store it in host array
cudaMemcpy(c_h, c_d, c_bytes, cudaMemcpyDeviceToHost);
printMatrix(c_h, numCRows, numCColumns);
// Cleanup
free(a_h);
cudaFree(a_d);
free(b_h);
cudaFree(b_d);
free(c_h);
cudaFree(c_d);
}
|
fcf8d2df4a910473170e69c1d2d53a18b11bbcc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sd_t_s1_2_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,double *t2_d, double *v2_d,size_t p4, size_t total_x, double* t3d) {
size_t h1,h2,h3,p6;
__shared__ double t2_shm[T1*4*Tcomm];
for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
size_t rest_x=blockIdx.x;
size_t thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
} | fcf8d2df4a910473170e69c1d2d53a18b11bbcc1.cu | #include "includes.h"
__global__ void sd_t_s1_2_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,double *t2_d, double *v2_d,size_t p4, size_t total_x, double* t3d) {
size_t h1,h2,h3,p6;
__shared__ double t2_shm[T1*4*Tcomm];
for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
size_t rest_x=blockIdx.x;
size_t thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
} |
103318074021ed9df450ad97f7f71d33d5653016.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
// #define SIZE 1000
#define SIZE 10
__global__ void max(int *a , int *c)
{
int i = threadIdx.x;
// kernel function definition
// initialize i to thread ID
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
srand(time(NULL));
//of the seed
//makes use of the computer's internal clock to control the choice
int a[SIZE]={12,4,7,3,9,5,11,6,1};
int c;
int *dev_a, *dev_c;
//GPU / device parameters
hipMalloc((void **) &dev_a, SIZE*sizeof(int));
//GPU from CUDA runtime API
hipMalloc((void **) &dev_c, SIZE*sizeof(int));
//assign memory to parameters on
// input the numbers
hipMemcpy(dev_a , a, SIZE*sizeof(int),hipMemcpyHostToDevice);
//array from CPU to GPUhipLaunchKernelGGL((
max), dim3(1),dim3(SIZE), 0, 0, dev_a,dev_c);
// call kernel function <<<number of blocks, number of threads
hipMemcpy(&c, dev_c, SIZE*sizeof(int),hipMemcpyDeviceToHost);
//result back from GPU to CPU
printf("\nmax = %d ",c);
//copy the
// copy thecudaFree(dev_a);
hipFree(dev_c);
return 0;
// Free the allocated memory
}
| 103318074021ed9df450ad97f7f71d33d5653016.cu | #include <cuda.h>
#include <stdio.h>
#include <time.h>
// #define SIZE 1000
#define SIZE 10
__global__ void max(int *a , int *c)
{
int i = threadIdx.x;
// kernel function definition
// initialize i to thread ID
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
srand(time(NULL));
//of the seed
//makes use of the computer's internal clock to control the choice
int a[SIZE]={12,4,7,3,9,5,11,6,1};
int c;
int *dev_a, *dev_c;
//GPU / device parameters
cudaMalloc((void **) &dev_a, SIZE*sizeof(int));
//GPU from CUDA runtime API
cudaMalloc((void **) &dev_c, SIZE*sizeof(int));
//assign memory to parameters on
// input the numbers
cudaMemcpy(dev_a , a, SIZE*sizeof(int),cudaMemcpyHostToDevice);
//array from CPU to GPU
max<<<1,SIZE>>>(dev_a,dev_c);
// call kernel function <<<number of blocks, number of threads
cudaMemcpy(&c, dev_c, SIZE*sizeof(int),cudaMemcpyDeviceToHost);
//result back from GPU to CPU
printf("\nmax = %d ",c);
//copy the
// copy thecudaFree(dev_a);
cudaFree(dev_c);
return 0;
// Free the allocated memory
}
|
756a8926ef3db07bbb8891cecf38f1be5ebcd1b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CUDALOCKSSEMAPHORESPIN_CU__
#define __CUDALOCKSSEMAPHORESPIN_CU__
#include "cudaLocks.h"
inline __host__ hipError_t cudaSemaphoreCreateSpin(cudaSemaphore_t * const handle,
const int semaphoreNumber,
const unsigned int count,
const int NUM_SM)
{
/*
Here we set the initial value to be count+1, this allows us to do an
atomicExch(sem, 0) and basically use the semaphore value as both a
lock and a semaphore.
*/
unsigned int initialValue = (count + 1);
*handle = semaphoreNumber;
for (int id = 0; id < NUM_SM; ++id) { // need to set these values for all SMs
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4))] = initialValue;
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4)) + 1] = 0;
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4)) + 2] = 0;
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4)) + 3] = initialValue;
}
return hipSuccess;
}
inline __device__ bool cudaSemaphoreSpinTryWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore all SMs use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; }
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) { atomicExch(lock, 0); }
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
// readers decrement the current count of the semaphore by 1 so other
// readers can also read the data (but not the writers since they needs
// the entire CS).
--currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
inline __device__ void cudaSemaphoreSpinWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
while (!cudaSemaphoreSpinTryWait(sem, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
}
}
inline __device__ void cudaSemaphoreSpinPost(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
if (isMasterThread)
{
/*
NOTE: This CAS will trigger an invalidation since we overload CAS's.
Since most of the data in the local critical section is written, it
hopefully won't affect performance too much.
*/
// try to acquire sem head lock
if (atomicCAS(lock, 0, 1) == 1) { acquired = false; }
else { acquired = true; }
}
__syncthreads();
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
// readers add 1 to the semaphore
++currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
// same wait algorithm but with local scope and per-SM synchronization
inline __device__ bool cudaSemaphoreSpinTryWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
// Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
// SM gets 4 of them (current count, head, tail, max count). So SM 0 starts
// at semaphoreBuffers[sem * 4 * NUM_SM].
unsigned int * const currCount = semaphoreBuffers +
((sem * 4 * NUM_SM) + (smID * 4));
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; }
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) { atomicExch(lock, 0); }
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
/*
readers decrement the current count of the semaphore by 1 so other
readers can also read the data (but not the writers since they needs
the entire CS).
*/
--currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
inline __device__ void cudaSemaphoreSpinWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
while (!cudaSemaphoreSpinTryWaitLocal(sem, smID, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
}
}
inline __device__ void cudaSemaphoreSpinPostLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
// Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
// SM gets 4 of them. So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM].
unsigned int * const currCount = semaphoreBuffers +
((sem * 4 * NUM_SM) + (smID * 4));
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
if (isMasterThread)
{
// try to acquire sem head lock
if (atomicCAS(lock, 0, 1) == 1) { acquired = false; }
else { acquired = true; }
}
__syncthreads();
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
// readers add 1 to the semaphore
++currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
#endif // #ifndef __CUDALOCKSSEMAPHORESPIN_CU__
| 756a8926ef3db07bbb8891cecf38f1be5ebcd1b0.cu | #ifndef __CUDALOCKSSEMAPHORESPIN_CU__
#define __CUDALOCKSSEMAPHORESPIN_CU__
#include "cudaLocks.h"
inline __host__ cudaError_t cudaSemaphoreCreateSpin(cudaSemaphore_t * const handle,
const int semaphoreNumber,
const unsigned int count,
const int NUM_SM)
{
/*
Here we set the initial value to be count+1, this allows us to do an
atomicExch(sem, 0) and basically use the semaphore value as both a
lock and a semaphore.
*/
unsigned int initialValue = (count + 1);
*handle = semaphoreNumber;
for (int id = 0; id < NUM_SM; ++id) { // need to set these values for all SMs
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4))] = initialValue;
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4)) + 1] = 0;
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4)) + 2] = 0;
cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) +
(id * 4)) + 3] = initialValue;
}
return cudaSuccess;
}
inline __device__ bool cudaSemaphoreSpinTryWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore all SMs use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; }
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) { atomicExch(lock, 0); }
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
// readers decrement the current count of the semaphore by 1 so other
// readers can also read the data (but not the writers since they needs
// the entire CS).
--currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
inline __device__ void cudaSemaphoreSpinWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
while (!cudaSemaphoreSpinTryWait(sem, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
}
}
inline __device__ void cudaSemaphoreSpinPost(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
if (isMasterThread)
{
/*
NOTE: This CAS will trigger an invalidation since we overload CAS's.
Since most of the data in the local critical section is written, it
hopefully won't affect performance too much.
*/
// try to acquire sem head lock
if (atomicCAS(lock, 0, 1) == 1) { acquired = false; }
else { acquired = true; }
}
__syncthreads();
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
// readers add 1 to the semaphore
++currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
// same wait algorithm but with local scope and per-SM synchronization
inline __device__ bool cudaSemaphoreSpinTryWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
// Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
// SM gets 4 of them (current count, head, tail, max count). So SM 0 starts
// at semaphoreBuffers[sem * 4 * NUM_SM].
unsigned int * const currCount = semaphoreBuffers +
((sem * 4 * NUM_SM) + (smID * 4));
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) { acq1 = true; }
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) { atomicExch(lock, 0); }
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
/*
readers decrement the current count of the semaphore by 1 so other
readers can also read the data (but not the writers since they needs
the entire CS).
*/
--currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
inline __device__ void cudaSemaphoreSpinWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
while (!cudaSemaphoreSpinTryWaitLocal(sem, smID, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
}
}
inline __device__ void cudaSemaphoreSpinPostLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
// Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
// SM gets 4 of them. So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM].
unsigned int * const currCount = semaphoreBuffers +
((sem * 4 * NUM_SM) + (smID * 4));
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
if (isMasterThread)
{
// try to acquire sem head lock
if (atomicCAS(lock, 0, 1) == 1) { acquired = false; }
else { acquired = true; }
}
__syncthreads();
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
// readers add 1 to the semaphore
++currCount[0];
}
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
#endif // #ifndef __CUDALOCKSSEMAPHORESPIN_CU__
|
931670007156685d408225381fd92ca2f3267d2c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
c[index] = a[index] + b[index];
}
}
#define N (2048 * 2048)
#define M 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)*d_c, size);
// Allocate space
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setup values
random_ints(a, N);
random_ints(b, N);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU with N blocks
hipLaunchKernelGGL(( add), dim3((N+M-1)/M), dim3(M), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | 931670007156685d408225381fd92ca2f3267d2c.cu | #include <cuda_runtime.h>
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
c[index] = a[index] + b[index];
}
}
#define N (2048 * 2048)
#define M 512
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)*d_c, size);
// Allocate space
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setup values
random_ints(a, N);
random_ints(b, N);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N blocks
add<<<(N+M-1)/M, M>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
86fb7a18087ee33869ff4fe0b072ead9114c9a86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void log_entropy_kernel(const int n, const Dtype* a, Dtype* y,const int channels,const Dtype* label=NULL,const Dtype* pred_label=NULL) {
CUDA_KERNEL_LOOP(index, n) {
if (label != NULL && pred_label != NULL)
{
int idx = index / channels;
if (label[idx] == pred_label[idx])
{
y[index] = log(max(a[index], Dtype(FLT_MIN)));
}
else
y[index] = 0;
}
else{
y[index] = log(max(a[index], Dtype(FLT_MIN)));
}
}
}
template <typename Dtype>
void EntropyWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
Dtype* log_prob_data = prob_.mutable_gpu_diff();
bool has_label = bottom.size() >= 3;
const Dtype* label = has_label ? bottom[1]->gpu_data() : NULL;
const Dtype* pred_label = has_label ? bottom[2]->gpu_data() : NULL;
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
log_entropy_kernel << <CAFFE_GET_BLOCKS(prob_.count()), CAFFE_CUDA_NUM_THREADS >> >(prob_.count(),
prob_data,
log_prob_data,
prob_.channels(),
label,
pred_label);
caffe_gpu_mul(prob_.count(), prob_data, log_prob_data, loss_data);
Dtype loss;
caffe_gpu_asum(bottom[0]->count(), loss_data, &loss);
Dtype valid_count = outer_num_*inner_num_;
int num_count = 0;
Dtype norm_weight = nthreads;
if (has_label)
{
const Dtype* label = has_label ? bottom[1]->cpu_data() : NULL;
const Dtype* pred_label = has_label ? bottom[2]->cpu_data() : NULL;
for (int i = 0; i < bottom[1]->count(); i++)
{
if (label[i] == pred_label[i])
num_count += 1;
}
valid_count = num_count+1e-10;
}
top[0]->mutable_cpu_data()[0] = loss / valid_count;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void channel_sum_kernel(const int n, const int spat_dim, const int channels, Dtype* a) {
CUDA_KERNEL_LOOP(index, n) {
int n_idx = index / spat_dim;
int spat_idx = index % spat_dim;
Dtype sum = 0;
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
sum += a[idx];
}
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
a[idx] = sum;
}
}
}
template <typename Dtype>
void EntropyWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
//caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* log_prob_data = prob_.gpu_diff();
bool has_label = bottom.size() >= 3;
const Dtype* label = has_label ? bottom[1]->cpu_data() : NULL;
const Dtype* pred_label = has_label ? bottom[2]->cpu_data() : NULL;
const int outer_dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
int num_count = 0;
Dtype norm_weight = nthreads;
if (has_label)
{
for (int i = 0; i < bottom[1]->count(); i++)
{
if (label[i] == pred_label[i])
num_count += 1;
}
norm_weight = num_count + 1e-10;
}
//channel_sum_kernel << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(
// nthreads, inner_num_, bottom[0]->channels(), bottom_diff);
//caffe_gpu_sub(bottom[0]->count(), bottom_diff, log_prob_data, bottom_diff);
//caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
if (inner_num_ > 1)
{
for (int n = 0; n < outer_num_; ++n)
{
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1, inner_num_, bottom[0]->channels(),
(Dtype)1.0, channel_mul_.gpu_data(), bottom_diff + n*outer_dim, (Dtype)0,
cache_.mutable_gpu_data()+n*inner_num_);
}
for (int n = 0; n < outer_num_; ++n)
{
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, bottom[0]->channels(), inner_num_, 1,
(Dtype)1.0, channel_mul_.gpu_data(), cache_.gpu_data() + n*inner_num_,
(Dtype)0, cache_.mutable_gpu_diff() + n*outer_dim);
}
}
else
{
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, 1, bottom[0]->channels(),
(Dtype)1.0, bottom_diff, channel_mul_.gpu_data(), (Dtype)0, cache_.mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, bottom[0]->channels(), 1,
(Dtype)1.0, cache_.gpu_data(), channel_mul_.gpu_data(), (Dtype)0, cache_.mutable_gpu_diff());
}
caffe_gpu_sub(bottom[0]->count(), cache_.gpu_diff(), log_prob_data, bottom_diff);
caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
Dtype loss_weight = top[0]->cpu_diff()[0] / norm_weight; // / nthreads;
if (use_T_)
{
loss_weight /= (Dtype)temperature_;
}
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
//LOG(INFO) << bottom[0]->asum_diff();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyWithLossLayer);
} // namespace caffe
| 86fb7a18087ee33869ff4fe0b072ead9114c9a86.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void log_entropy_kernel(const int n, const Dtype* a, Dtype* y,const int channels,const Dtype* label=NULL,const Dtype* pred_label=NULL) {
CUDA_KERNEL_LOOP(index, n) {
if (label != NULL && pred_label != NULL)
{
int idx = index / channels;
if (label[idx] == pred_label[idx])
{
y[index] = log(max(a[index], Dtype(FLT_MIN)));
}
else
y[index] = 0;
}
else{
y[index] = log(max(a[index], Dtype(FLT_MIN)));
}
}
}
template <typename Dtype>
void EntropyWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
Dtype* log_prob_data = prob_.mutable_gpu_diff();
bool has_label = bottom.size() >= 3;
const Dtype* label = has_label ? bottom[1]->gpu_data() : NULL;
const Dtype* pred_label = has_label ? bottom[2]->gpu_data() : NULL;
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
log_entropy_kernel << <CAFFE_GET_BLOCKS(prob_.count()), CAFFE_CUDA_NUM_THREADS >> >(prob_.count(),
prob_data,
log_prob_data,
prob_.channels(),
label,
pred_label);
caffe_gpu_mul(prob_.count(), prob_data, log_prob_data, loss_data);
Dtype loss;
caffe_gpu_asum(bottom[0]->count(), loss_data, &loss);
Dtype valid_count = outer_num_*inner_num_;
int num_count = 0;
Dtype norm_weight = nthreads;
if (has_label)
{
const Dtype* label = has_label ? bottom[1]->cpu_data() : NULL;
const Dtype* pred_label = has_label ? bottom[2]->cpu_data() : NULL;
for (int i = 0; i < bottom[1]->count(); i++)
{
if (label[i] == pred_label[i])
num_count += 1;
}
valid_count = num_count+1e-10;
}
top[0]->mutable_cpu_data()[0] = loss / valid_count;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void channel_sum_kernel(const int n, const int spat_dim, const int channels, Dtype* a) {
CUDA_KERNEL_LOOP(index, n) {
int n_idx = index / spat_dim;
int spat_idx = index % spat_dim;
Dtype sum = 0;
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
sum += a[idx];
}
for (int ch = 0; ch < channels; ++ch)
{
int idx = (n_idx*channels + ch)*spat_dim + spat_idx;
a[idx] = sum;
}
}
}
template <typename Dtype>
void EntropyWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
//caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* log_prob_data = prob_.gpu_diff();
bool has_label = bottom.size() >= 3;
const Dtype* label = has_label ? bottom[1]->cpu_data() : NULL;
const Dtype* pred_label = has_label ? bottom[2]->cpu_data() : NULL;
const int outer_dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
int num_count = 0;
Dtype norm_weight = nthreads;
if (has_label)
{
for (int i = 0; i < bottom[1]->count(); i++)
{
if (label[i] == pred_label[i])
num_count += 1;
}
norm_weight = num_count + 1e-10;
}
//channel_sum_kernel << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(
// nthreads, inner_num_, bottom[0]->channels(), bottom_diff);
//caffe_gpu_sub(bottom[0]->count(), bottom_diff, log_prob_data, bottom_diff);
//caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
if (inner_num_ > 1)
{
for (int n = 0; n < outer_num_; ++n)
{
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1, inner_num_, bottom[0]->channels(),
(Dtype)1.0, channel_mul_.gpu_data(), bottom_diff + n*outer_dim, (Dtype)0,
cache_.mutable_gpu_data()+n*inner_num_);
}
for (int n = 0; n < outer_num_; ++n)
{
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, bottom[0]->channels(), inner_num_, 1,
(Dtype)1.0, channel_mul_.gpu_data(), cache_.gpu_data() + n*inner_num_,
(Dtype)0, cache_.mutable_gpu_diff() + n*outer_dim);
}
}
else
{
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, 1, bottom[0]->channels(),
(Dtype)1.0, bottom_diff, channel_mul_.gpu_data(), (Dtype)0, cache_.mutable_gpu_data());
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, bottom[0]->channels(), 1,
(Dtype)1.0, cache_.gpu_data(), channel_mul_.gpu_data(), (Dtype)0, cache_.mutable_gpu_diff());
}
caffe_gpu_sub(bottom[0]->count(), cache_.gpu_diff(), log_prob_data, bottom_diff);
caffe_gpu_mul(bottom[0]->count(), bottom_diff, prob_data, bottom_diff);
Dtype loss_weight = top[0]->cpu_diff()[0] / norm_weight; // / nthreads;
if (use_T_)
{
loss_weight /= (Dtype)temperature_;
}
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
//LOG(INFO) << bottom[0]->asum_diff();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyWithLossLayer);
} // namespace caffe
|
eb4d29b8ac9db290c07e1e9067f767536879c0b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <sentinel.h>
#include "fileutils.cuh"
#include "dcat.cuh"
#include "dchgrp.cuh"
#include "dchmod.cuh"
#include "dchown.cuh"
#include "dcmp_hip.cuh"
#include "dcp.cuh"
#include "dgrep.cuh"
#include "dls.cuh"
#include "dmkdir.cuh"
#include "dmore.cuh"
#include "dmv.cuh"
#include "drm.cuh"
#include "drmdir.cuh"
#include "dpwd_hip.cuh"
#include "dcd.cuh"
extern "C" bool sentinelFileUtilsExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*, char*, char*, intptr_t));
static sentinelExecutor _fileUtilsExecutor = { nullptr, "fileutils", sentinelFileUtilsExecutor, nullptr };
void sentinelRegisterFileUtils() {
sentinelRegisterExecutor(&_fileUtilsExecutor, true, false);
} | eb4d29b8ac9db290c07e1e9067f767536879c0b5.cu | #include <sentinel.h>
#include "fileutils.cuh"
#include "dcat.cuh"
#include "dchgrp.cuh"
#include "dchmod.cuh"
#include "dchown.cuh"
#include "dcmp.cuh"
#include "dcp.cuh"
#include "dgrep.cuh"
#include "dls.cuh"
#include "dmkdir.cuh"
#include "dmore.cuh"
#include "dmv.cuh"
#include "drm.cuh"
#include "drmdir.cuh"
#include "dpwd.cuh"
#include "dcd.cuh"
extern "C" bool sentinelFileUtilsExecutor(void *tag, sentinelMessage *data, int length, char *(**hostPrepare)(void*, char*, char*, intptr_t));
static sentinelExecutor _fileUtilsExecutor = { nullptr, "fileutils", sentinelFileUtilsExecutor, nullptr };
void sentinelRegisterFileUtils() {
sentinelRegisterExecutor(&_fileUtilsExecutor, true, false);
} |
07674d3967a39e862532cc7fd9420adb8031ff60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_pool_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T>
__global__ void GPUROIPoolForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
int* roi_batch_id_data, T* output_data, int64_t* argmax_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
int roi_start_w = round(offset_input_rois[0] * spatial_scale);
int roi_start_h = round(offset_input_rois[1] * spatial_scale);
int roi_end_w = round(offset_input_rois[2] * spatial_scale);
int roi_end_h = round(offset_input_rois[3] * spatial_scale);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int hstart = static_cast<int>(floor(static_cast<double>(ph) *
static_cast<double>(roi_height) /
static_cast<double>(pooled_height)));
int wstart = static_cast<int>(floor(static_cast<double>(pw) *
static_cast<double>(roi_width) /
static_cast<double>(pooled_width)));
int hend = static_cast<int>(ceil(static_cast<double>(ph + 1) *
static_cast<double>(roi_height) /
static_cast<double>(pooled_height)));
int wend = static_cast<int>(ceil(static_cast<double>(pw + 1) *
static_cast<double>(roi_width) /
static_cast<double>(pooled_width)));
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
T maxval = is_empty ? 0 : -std::numeric_limits<T>::max();
int maxidx = -1;
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_data_index = h * width + w;
if (offset_input_data[input_data_index] > maxval) {
maxval = offset_input_data[input_data_index];
maxidx = input_data_index;
}
}
}
output_data[i] = maxval;
if (argmax_data) {
argmax_data[i] = maxidx;
}
}
}
template <typename T>
__global__ void GPUROIPoolBackward(
const int nthreads, const T* input_rois, const T* output_grad,
const int64_t* argmax_data, const int num_rois, const float spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, int* roi_batch_id_data,
T* input_grad) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = roi_batch_id_data[n];
int input_offset = (roi_batch_ind * channels + c) * height * width;
int output_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_output_grad = output_grad + output_offset;
T* offset_input_grad = input_grad + input_offset;
const int64_t* offset_argmax_data = argmax_data + output_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
platform::CudaAtomicAdd(
offset_input_grad + argmax,
static_cast<T>(offset_output_grad[ph * pooled_width + pw]));
}
}
}
template <typename Place, typename T>
class GPUROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto* argmax = ctx.Output<Tensor>("Argmax");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
auto in_stride = framework::stride(in_dims);
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
if (ctx.HasInput("RoisLod")) {
auto* rois_lod = ctx.Input<Tensor>("RoisLod");
int rois_batch_size = rois_lod->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size - 1, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
std::vector<int64_t> rois_lod_(rois_batch_size);
memory::Copy(cplace, rois_lod_.data(), gplace, rois_lod->data<int64_t>(),
sizeof(int64_t) * rois_batch_size, 0);
for (int n = 0; n < rois_batch_size - 1; ++n) {
for (size_t i = rois_lod_[n]; i < rois_lod_[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
hipLaunchKernelGGL(( GPUROIPoolForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()),
argmax->mutable_data<int64_t>(ctx.GetPlace()));
}
};
template <typename Place, typename T>
class GPUROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* rois_lod = ctx.Input<Tensor>("RoisLod");
auto* argmax = ctx.Input<Tensor>("Argmax");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (x_grad) {
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
if (ctx.HasInput("RoisLod")) {
auto* rois_lod = ctx.Input<Tensor>("RoisLod");
int rois_batch_size = rois_lod->numel();
std::vector<int64_t> rois_lod_(rois_batch_size);
memory::Copy(cplace, rois_lod_.data(), gplace,
rois_lod->data<int64_t>(),
sizeof(int64_t) * rois_batch_size, 0);
for (int n = 0; n < rois_batch_size - 1; ++n) {
for (size_t i = rois_lod_[n]; i < rois_lod_[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
x_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, x_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUROIPoolBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size, rois->data<T>(), out_grad->data<T>(),
argmax->data<int64_t>(), rois_num, spatial_scale, channels, height,
width, pooled_height, pooled_width, roi_id_data,
x_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_pool,
ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_pool_grad,
ops::GPUROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
| 07674d3967a39e862532cc7fd9420adb8031ff60.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_pool_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T>
__global__ void GPUROIPoolForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
int* roi_batch_id_data, T* output_data, int64_t* argmax_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
int roi_start_w = round(offset_input_rois[0] * spatial_scale);
int roi_start_h = round(offset_input_rois[1] * spatial_scale);
int roi_end_w = round(offset_input_rois[2] * spatial_scale);
int roi_end_h = round(offset_input_rois[3] * spatial_scale);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int hstart = static_cast<int>(floor(static_cast<double>(ph) *
static_cast<double>(roi_height) /
static_cast<double>(pooled_height)));
int wstart = static_cast<int>(floor(static_cast<double>(pw) *
static_cast<double>(roi_width) /
static_cast<double>(pooled_width)));
int hend = static_cast<int>(ceil(static_cast<double>(ph + 1) *
static_cast<double>(roi_height) /
static_cast<double>(pooled_height)));
int wend = static_cast<int>(ceil(static_cast<double>(pw + 1) *
static_cast<double>(roi_width) /
static_cast<double>(pooled_width)));
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
T maxval = is_empty ? 0 : -std::numeric_limits<T>::max();
int maxidx = -1;
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_data_index = h * width + w;
if (offset_input_data[input_data_index] > maxval) {
maxval = offset_input_data[input_data_index];
maxidx = input_data_index;
}
}
}
output_data[i] = maxval;
if (argmax_data) {
argmax_data[i] = maxidx;
}
}
}
template <typename T>
__global__ void GPUROIPoolBackward(
const int nthreads, const T* input_rois, const T* output_grad,
const int64_t* argmax_data, const int num_rois, const float spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, int* roi_batch_id_data,
T* input_grad) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = roi_batch_id_data[n];
int input_offset = (roi_batch_ind * channels + c) * height * width;
int output_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_output_grad = output_grad + output_offset;
T* offset_input_grad = input_grad + input_offset;
const int64_t* offset_argmax_data = argmax_data + output_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
platform::CudaAtomicAdd(
offset_input_grad + argmax,
static_cast<T>(offset_output_grad[ph * pooled_width + pw]));
}
}
}
template <typename Place, typename T>
class GPUROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto* argmax = ctx.Output<Tensor>("Argmax");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
auto in_stride = framework::stride(in_dims);
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
if (ctx.HasInput("RoisLod")) {
auto* rois_lod = ctx.Input<Tensor>("RoisLod");
int rois_batch_size = rois_lod->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size - 1, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
std::vector<int64_t> rois_lod_(rois_batch_size);
memory::Copy(cplace, rois_lod_.data(), gplace, rois_lod->data<int64_t>(),
sizeof(int64_t) * rois_batch_size, 0);
for (int n = 0; n < rois_batch_size - 1; ++n) {
for (size_t i = rois_lod_[n]; i < rois_lod_[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
GPUROIPoolForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()),
argmax->mutable_data<int64_t>(ctx.GetPlace()));
}
};
template <typename Place, typename T>
class GPUROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* rois_lod = ctx.Input<Tensor>("RoisLod");
auto* argmax = ctx.Input<Tensor>("Argmax");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (x_grad) {
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
if (ctx.HasInput("RoisLod")) {
auto* rois_lod = ctx.Input<Tensor>("RoisLod");
int rois_batch_size = rois_lod->numel();
std::vector<int64_t> rois_lod_(rois_batch_size);
memory::Copy(cplace, rois_lod_.data(), gplace,
rois_lod->data<int64_t>(),
sizeof(int64_t) * rois_batch_size, 0);
for (int n = 0; n < rois_batch_size - 1; ++n) {
for (size_t i = rois_lod_[n]; i < rois_lod_[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
x_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, x_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUROIPoolBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size, rois->data<T>(), out_grad->data<T>(),
argmax->data<int64_t>(), rois_num, spatial_scale, channels, height,
width, pooled_height, pooled_width, roi_id_data,
x_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_pool,
ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_pool_grad,
ops::GPUROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
c01ac470b64511e1233c2500064f60501e82cf68.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "cudnn_util.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
extern __shared__ float arr_sh[];
__global__ void sparse_fully_connected_kernel(
float * __restrict output_neurons,
const float * __restrict input_neurons,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int input_feature_map_block_size,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * input_feature_map_block_size;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int it_count = min(input_feature_map_block_size, end_column_index - base_nnz_index);
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile int * column_indices_sh = (int *)arr_sh;
if (lane_id < it_count)
column_indices_sh[thread_id] = column_indices[base_nnz_index + lane_id];
int window_it_count = (window_size + 31) >> 5;
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
{
valid[i] = (i < (entry_count - base_entry_id));
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
for(int i = 0; i < it_count; ++i)
{
int index = base_nnz_index + i;
int column_id = column_indices_sh[warp_id * 32 + i];
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++k)
{
float inp = __load_nc(input_neurons + entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id);
sums[k] += w * inp;
}
}
local_weight_id += 32;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
}
if (lane_id == 0)
{
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(output_neurons + (base_entry_id + i) * output_elem_count_per_entry + row_id, sums[i]);
}
}
const int sparse_fully_connected_layer_tester_cuda::max_input_feature_map_block_size = 32;
sparse_fully_connected_layer_tester_cuda::sparse_fully_connected_layer_tester_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_layer_tester_cuda::~sparse_fully_connected_layer_tester_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
cuda_util::set_with_value(
*cuda_config,
*output_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> input_feature_map_block_size_and_count = get_input_feature_map_block_size_and_count();
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * input_feature_map_block_size_and_count.second,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
hipLaunchKernelGGL(( sparse_fully_connected_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[0],
entry_count,
input_feature_map_block_size_and_count.first,
window_size);
// Add bias
if (bias)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_buffer));
}
}
void sparse_fully_connected_layer_tester_cuda::tester_configured()
{
std::shared_ptr<const sparse_convolution_layer> layer_derived = std::dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
bias = layer_derived->bias;
window_size = 1;
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_size *= *it;
cudnn_util::set_tensor_bias_descriptor(
bias_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
}
void sparse_fully_connected_layer_tester_cuda::notify_data_custom(layer_data_custom::const_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = ::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_layer_tester_cuda::get_input_feature_map_block_size_and_count() const
{
int candidate_block_size = max_column_index_count_per_row;
if (candidate_block_size <= max_input_feature_map_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_input_feature_map_block_size - 1) / max_input_feature_map_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
| c01ac470b64511e1233c2500064f60501e82cf68.cu | /*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "cudnn_util.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
extern __shared__ float arr_sh[];
__global__ void sparse_fully_connected_kernel(
float * __restrict output_neurons,
const float * __restrict input_neurons,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int input_feature_map_block_size,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * input_feature_map_block_size;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int it_count = min(input_feature_map_block_size, end_column_index - base_nnz_index);
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile int * column_indices_sh = (int *)arr_sh;
if (lane_id < it_count)
column_indices_sh[thread_id] = column_indices[base_nnz_index + lane_id];
int window_it_count = (window_size + 31) >> 5;
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
{
valid[i] = (i < (entry_count - base_entry_id));
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
for(int i = 0; i < it_count; ++i)
{
int index = base_nnz_index + i;
int column_id = column_indices_sh[warp_id * 32 + i];
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++k)
{
float inp = __load_nc(input_neurons + entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id);
sums[k] += w * inp;
}
}
local_weight_id += 32;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
}
if (lane_id == 0)
{
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(output_neurons + (base_entry_id + i) * output_elem_count_per_entry + row_id, sums[i]);
}
}
const int sparse_fully_connected_layer_tester_cuda::max_input_feature_map_block_size = 32;
sparse_fully_connected_layer_tester_cuda::sparse_fully_connected_layer_tester_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_layer_tester_cuda::~sparse_fully_connected_layer_tester_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
cuda_util::set_with_value(
*cuda_config,
*output_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> input_feature_map_block_size_and_count = get_input_feature_map_block_size_and_count();
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * input_feature_map_block_size_and_count.second,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
sparse_fully_connected_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[0],
entry_count,
input_feature_map_block_size_and_count.first,
window_size);
// Add bias
if (bias)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_buffer));
}
}
void sparse_fully_connected_layer_tester_cuda::tester_configured()
{
std::shared_ptr<const sparse_convolution_layer> layer_derived = std::dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
bias = layer_derived->bias;
window_size = 1;
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_size *= *it;
cudnn_util::set_tensor_bias_descriptor(
bias_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
}
void sparse_fully_connected_layer_tester_cuda::notify_data_custom(layer_data_custom::const_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = std::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_layer_tester_cuda::get_input_feature_map_block_size_and_count() const
{
int candidate_block_size = max_column_index_count_per_row;
if (candidate_block_size <= max_input_feature_map_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_input_feature_map_block_size - 1) / max_input_feature_map_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
|
0ccf7bfd60a9a886a2b3d32e4ce0c46a1852611b.hip | // !!! This is a file automatically generated by hipify!!!
#include "common/book.h"
int main( void ) {
hipDeviceProp_t prop;
int count;
HANDLE_ERROR( hipGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
HANDLE_ERROR( hipGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
} | 0ccf7bfd60a9a886a2b3d32e4ce0c46a1852611b.cu | #include "common/book.h"
int main( void ) {
cudaDeviceProp prop;
int count;
HANDLE_ERROR( cudaGetDeviceCount( &count ) );
for (int i=0; i< count; i++) {
HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
} |
57bd4d39cc77985930c93ecf77f25f18fdbb0db0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "../common/common_win.h"
int main()
{
int dev = 0;
CHECK(hipSetDevice(dev));
unsigned int size = 1 << 24;
unsigned int nbytes = size * sizeof(float);
hipDeviceProp_t devp;
CHECK(hipGetDeviceProperties(&devp, dev));
printf("Dev: %d, name %s, size %5.2fMB\n",dev, devp.name, nbytes/1024.f/1024.f);
float *h = (float *)malloc(nbytes);
init_float(h, size);
float *d;
CHECK(hipMalloc((void **)&d, nbytes));
CHECK(hipMemcpy(d, h, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(h, d, nbytes, hipMemcpyDeviceToHost));
CHECK(hipFree(d));
free(h);
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 57bd4d39cc77985930c93ecf77f25f18fdbb0db0.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "../common/common_win.h"
int main()
{
int dev = 0;
CHECK(cudaSetDevice(dev));
unsigned int size = 1 << 24;
unsigned int nbytes = size * sizeof(float);
cudaDeviceProp devp;
CHECK(cudaGetDeviceProperties(&devp, dev));
printf("Dev: %d, name %s, size %5.2fMB\n",dev, devp.name, nbytes/1024.f/1024.f);
float *h = (float *)malloc(nbytes);
init_float(h, size);
float *d;
CHECK(cudaMalloc((void **)&d, nbytes));
CHECK(cudaMemcpy(d, h, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(h, d, nbytes, cudaMemcpyDeviceToHost));
CHECK(cudaFree(d));
free(h);
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
4a957f3c64789509a0c58f6e6299c0c162a613cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
if(tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty]/sum;
else
out[tx * width + ty] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
hipLaunchKernelGGL(( norm), dim3(grid), dim3(block), 0, 0, dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
| 4a957f3c64789509a0c58f6e6299c0c162a613cf.cu | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
if(tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty]/sum;
else
out[tx * width + ty] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
norm<<<grid, block>>>(dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
|
fca02e45d2379a3478c0ac03dc0e986041e60444.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "xMaxDeltaIntegralKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *intData = NULL;
hipMalloc(&intData, XSIZE*YSIZE);
float *tmpArray = NULL;
hipMalloc(&tmpArray, XSIZE*YSIZE);
const int nWindows = 1;
const int h = 1;
const int w = 1;
const float *xMax = NULL;
hipMalloc(&xMax, XSIZE*YSIZE);
const float *yMin = NULL;
hipMalloc(&yMin, XSIZE*YSIZE);
const float *yMax = NULL;
hipMalloc(&yMax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
xMaxDeltaIntegralKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,tmpArray,nWindows,h,w,xMax,yMin,yMax);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
xMaxDeltaIntegralKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,tmpArray,nWindows,h,w,xMax,yMin,yMax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
xMaxDeltaIntegralKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,tmpArray,nWindows,h,w,xMax,yMin,yMax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fca02e45d2379a3478c0ac03dc0e986041e60444.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "xMaxDeltaIntegralKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *intData = NULL;
cudaMalloc(&intData, XSIZE*YSIZE);
float *tmpArray = NULL;
cudaMalloc(&tmpArray, XSIZE*YSIZE);
const int nWindows = 1;
const int h = 1;
const int w = 1;
const float *xMax = NULL;
cudaMalloc(&xMax, XSIZE*YSIZE);
const float *yMin = NULL;
cudaMalloc(&yMin, XSIZE*YSIZE);
const float *yMax = NULL;
cudaMalloc(&yMax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
xMaxDeltaIntegralKernel<<<gridBlock,threadBlock>>>(intData,tmpArray,nWindows,h,w,xMax,yMin,yMax);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
xMaxDeltaIntegralKernel<<<gridBlock,threadBlock>>>(intData,tmpArray,nWindows,h,w,xMax,yMin,yMax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
xMaxDeltaIntegralKernel<<<gridBlock,threadBlock>>>(intData,tmpArray,nWindows,h,w,xMax,yMin,yMax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c3faf32d9474ffdf3ac9ed4ee14396ca1349a367.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file pctdemo_life_mex_shmem.cu
* @brief Example of implementing a stencil operation on the GPU using shared memory.
*
* Copyright 2013 The MathWorks, Inc.
*/
#include <stdint.h>
#include <algorithm>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include "get_lookup_mex.hpp"
#include "mex.h"
/**
* Host function called by MEX gateway. Sets up and calls the device function
* for each generation.
*/
#define CUDART_PI_F 3.141592654f
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
mexPrintf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
void complex_mult(const float & a_1, const float & a_2, const float & b_1, const float & b_2, float* c_1, float* c_2) {
*c_1 = a_1 * b_1 - a_2 * b_2;
*c_2 = a_1 * b_2 + a_2 * b_1;
}
__device__
float modulo_float(const float & a, const float & b) {
return fmodf(fmodf(a,b) + b , b);
}
__global__
void dipole_compute_kernel_test(float * const pResidue, int32_t* const pPositive, float * const pOutArray, float * const pOut2Array,
int const dim_out_1, int const dim_out_2, int const dim_out_3, int const dims_Positive, int const direction)
{
}
__global__
void set_to_minus_1(int32_t * const pLookupTable, const int size_lookup_table)
{
int id_1D = threadIdx.x + blockIdx.x * blockDim.x;
if (id_1D < size_lookup_table) { // because can be change to negative if all use when removing dipole along another direction
pLookupTable[id_1D] = -1;
}
}
__global__
void lookup_compute_kernel(int32_t * const pLookupTable, int32_t const * const plookup_z_start, int32_t const * const pResidue,const int dim_1_residue)
{
int id_1D = threadIdx.x + blockIdx.x * blockDim.x;
if (id_1D < dim_1_residue) { // because can be change to negative if all use when removing dipole along another direction
const int case_ID_colomn = 2;
const int z_ID_colomn = 5;
const int row_num = 6;
//current residue data
int case_current_residue = pResidue[case_ID_colomn + row_num*id_1D];
int z_current_residue = pResidue[z_ID_colomn + row_num*id_1D];
//last residue data
int case_last_residue = -1;
int z_last_residue = -1;
if (id_1D > 0) {
case_last_residue = pResidue[case_ID_colomn + row_num*(id_1D - 1)];
z_last_residue = pResidue[z_ID_colomn + row_num*(id_1D - 1)];
}
//check if need to update the lookup table
if (case_current_residue != case_last_residue || z_current_residue != z_last_residue) {
int lookup_ID = plookup_z_start[z_current_residue] + case_current_residue;//the id in the lookup table to update
pLookupTable[lookup_ID] = id_1D;// update it with the 1D index to the given element
}
}
}
int lookup_KERNEL(int32_t * const pLookupTable, int32_t const * const plookup_z_start, int32_t const * const pResidue, int dim_1_residue, int size_lookup_table)
{
int blockSize;
int minGridSize;
int gridSize;
int arrayCount = size_lookup_table; // because itterates for each positive residue
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)set_to_minus_1, 0, arrayCount);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
set_to_minus_1 << <gridSize, blockSize >> > (pLookupTable, size_lookup_table);
//put the dimension in simpler variables
//int const size_3D_1 = size_residue_3D[0];
//int const size_3D_2 = size_residue_3D[1];
//int const size_3D_3 = size_residue_3D[2];
//variables for launch configuration
//int blockSize;
//int minGridSize;
//int gridSize;
arrayCount = dim_1_residue; // because itterates for each positive residue
hipOccupancyMaxPotentialBlockSize(&minGridSize,&blockSize,(void*)lookup_compute_kernel,0,arrayCount);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
lookup_compute_kernel << <gridSize, blockSize >> > ( pLookupTable, plookup_z_start, pResidue, dim_1_residue);
//gpuErrchk(hipPeekAtLastError());
//gpuErrchk(hipDeviceSynchronize());
//hipDeviceSynchronize();
return 1;
}
| c3faf32d9474ffdf3ac9ed4ee14396ca1349a367.cu | /**
* @file pctdemo_life_mex_shmem.cu
* @brief Example of implementing a stencil operation on the GPU using shared memory.
*
* Copyright 2013 The MathWorks, Inc.
*/
#include <stdint.h>
#include <algorithm>
#include <math.h>
#include <cuda_runtime_api.h>
#include "get_lookup_mex.hpp"
#include "mex.h"
/**
* Host function called by MEX gateway. Sets up and calls the device function
* for each generation.
*/
#define CUDART_PI_F 3.141592654f
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
mexPrintf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
void complex_mult(const float & a_1, const float & a_2, const float & b_1, const float & b_2, float* c_1, float* c_2) {
*c_1 = a_1 * b_1 - a_2 * b_2;
*c_2 = a_1 * b_2 + a_2 * b_1;
}
__device__
float modulo_float(const float & a, const float & b) {
return fmodf(fmodf(a,b) + b , b);
}
__global__
void dipole_compute_kernel_test(float * const pResidue, int32_t* const pPositive, float * const pOutArray, float * const pOut2Array,
int const dim_out_1, int const dim_out_2, int const dim_out_3, int const dims_Positive, int const direction)
{
}
__global__
void set_to_minus_1(int32_t * const pLookupTable, const int size_lookup_table)
{
int id_1D = threadIdx.x + blockIdx.x * blockDim.x;
if (id_1D < size_lookup_table) { // because can be change to negative if all use when removing dipole along another direction
pLookupTable[id_1D] = -1;
}
}
__global__
void lookup_compute_kernel(int32_t * const pLookupTable, int32_t const * const plookup_z_start, int32_t const * const pResidue,const int dim_1_residue)
{
int id_1D = threadIdx.x + blockIdx.x * blockDim.x;
if (id_1D < dim_1_residue) { // because can be change to negative if all use when removing dipole along another direction
const int case_ID_colomn = 2;
const int z_ID_colomn = 5;
const int row_num = 6;
//current residue data
int case_current_residue = pResidue[case_ID_colomn + row_num*id_1D];
int z_current_residue = pResidue[z_ID_colomn + row_num*id_1D];
//last residue data
int case_last_residue = -1;
int z_last_residue = -1;
if (id_1D > 0) {
case_last_residue = pResidue[case_ID_colomn + row_num*(id_1D - 1)];
z_last_residue = pResidue[z_ID_colomn + row_num*(id_1D - 1)];
}
//check if need to update the lookup table
if (case_current_residue != case_last_residue || z_current_residue != z_last_residue) {
int lookup_ID = plookup_z_start[z_current_residue] + case_current_residue;//the id in the lookup table to update
pLookupTable[lookup_ID] = id_1D;// update it with the 1D index to the given element
}
}
}
int lookup_KERNEL(int32_t * const pLookupTable, int32_t const * const plookup_z_start, int32_t const * const pResidue, int dim_1_residue, int size_lookup_table)
{
int blockSize;
int minGridSize;
int gridSize;
int arrayCount = size_lookup_table; // because itterates for each positive residue
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*)set_to_minus_1, 0, arrayCount);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
set_to_minus_1 << <gridSize, blockSize >> > (pLookupTable, size_lookup_table);
//put the dimension in simpler variables
//int const size_3D_1 = size_residue_3D[0];
//int const size_3D_2 = size_residue_3D[1];
//int const size_3D_3 = size_residue_3D[2];
//variables for launch configuration
//int blockSize;
//int minGridSize;
//int gridSize;
arrayCount = dim_1_residue; // because itterates for each positive residue
cudaOccupancyMaxPotentialBlockSize(&minGridSize,&blockSize,(void*)lookup_compute_kernel,0,arrayCount);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
lookup_compute_kernel << <gridSize, blockSize >> > ( pLookupTable, plookup_z_start, pResidue, dim_1_residue);
//gpuErrchk(cudaPeekAtLastError());
//gpuErrchk(cudaDeviceSynchronize());
//cudaDeviceSynchronize();
return 1;
}
|
31f7702a9aac059cfa940543b32f4a221cba11ab.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include "EfficientStreamCompaction.h"
namespace StreamCompaction {
namespace Efficient {
__global__ void kernSetZero(int N, int* dev_data) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
dev_data[index] = 0;
}
__global__ void kernMapToBoolean(int N, int *bools, PathSegment *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
bools[index] = idata[index].remainingBounces ? 1 : 0;
}
//__global__ void kernScatter(int n, PathSegment *odata,
// PathSegment *idata, const int *bools, const int *indices) {
// int index = threadIdx.x + (blockIdx.x * blockDim.x);
// if (index >= n) {
// return;
// }
// if (bools[index]) {
// odata[indices[index]] = idata[index];
// }
//}
__global__ void kernSetCompactCount(int N, int* dev_count, int* bools, int* indices) {
dev_count[0] = bools[N - 1] ? (indices[N - 1] + 1) : indices[N - 1];
}
__global__ void kernScanDynamicShared(int n, int *g_odata, int *g_idata, int *OriRoot) {
extern __shared__ int temp[];
int thid = threadIdx.x;
// assume it's always a 1D block
int blockOffset = 2 * blockDim.x * blockIdx.x;
int offset = 1;
temp[2 * thid] = g_idata[blockOffset + 2 * thid];
temp[2 * thid + 1] = g_idata[blockOffset + 2 * thid + 1];
// Up-sweep
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
// save origin root and set it to zero
if (thid == 0) {
OriRoot[blockIdx.x] = temp[n - 1];
temp[n - 1] = 0;
}
// Down-sweep
for (int d = 1; d < n; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[blockOffset + 2 * thid] = temp[2 * thid];
g_odata[blockOffset + 2 * thid + 1] = temp[2 * thid + 1];
}
__global__ void kernAddOriRoot(int N, int LeftMoveBits, int* OriRoot, int* dev_odata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//dev_odata[index] += OriRoot[blockIdx.x / stride];
dev_odata[index] += OriRoot[blockIdx.x >> LeftMoveBits];
}
void scanDynamicShared(int n, int *dev_odata, const int *dev_idata) {
int* dev_data;
dim3 blockDim(blockSize);
dim3 gridDim((n + blockSize - 1) / blockSize);
int size = gridDim.x * blockSize;
hipMalloc((void**)&dev_data, sizeof(int) * size);
//checkCUDAError("hipMalloc dev_idata failed!");
hipDeviceSynchronize();
kernSetZero << < gridDim, blockDim >> > (size, dev_data);
//checkCUDAError("kernSetZero failed!");
int* ori_root;
// ori_root_size has to be like that(first divide blockSize then multiply),
// because also needs to meet efficient algorithm requirement
// eg.
// blockSize == 4,
// indicies : 0 1 2 3 | 4 5 -> 0 1 2 3 | 4 5 0 0
// elcusive_scan result : 0 0 1 3 | 0 4 9 9
// ori_root : 6 9 (0 0)
int ori_root_size = (gridDim.x + blockSize - 1) / blockSize;
ori_root_size *= blockSize;
hipMalloc((void**)&ori_root, sizeof(int) * ori_root_size);
//checkCUDAError("hipMalloc ori_root failed!");
hipDeviceSynchronize();
kernSetZero << < dim3((ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_size, ori_root);
//checkCUDAError("kernSetZero failed!");
hipMemcpy(dev_data, dev_idata, sizeof(int) * n, hipMemcpyDeviceToDevice);
//checkCUDAError("hipMemcpy failed!");
int sharedMemoryPerBlockInBytes = blockDim.x * sizeof(int);
// Step 1 : do scan
kernScanDynamicShared << <gridDim, dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, dev_data, dev_data, ori_root);
// Step 2.5 : scans of scan
// like ori_root_size,
// ori_root_of_ori_root_size has to align with blockSize
int *ori_root_of_ori_root;
int ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
hipMalloc((void**)&ori_root_of_ori_root, sizeof(int) * ori_root_of_ori_root_size);
//checkCUDAError("hipMalloc ori_root_of_ori_root failed!");
int stride = 1;
do {
// do scan of scan of scan here
kernSetZero << < dim3((ori_root_of_ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_of_ori_root_size, ori_root_of_ori_root);
//checkCUDAError("kernSetZero failed!");
kernScanDynamicShared << < dim3(ori_root_size / blockSize), dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, ori_root, ori_root, ori_root_of_ori_root);
//checkCUDAError("kernScanDynamicShared 2 failed!");
kernAddOriRoot << <gridDim, blockDim >> > (size, ilog2(stride), ori_root, dev_data);
//checkCUDAError("kernAddOriRoot failed!");
// exit here
// we exit until there is only one block
if (ori_root_size == blockSize) {
break;
}
// reset ori_root and ori_root_of_ori_root infomation
ori_root_size = ori_root_of_ori_root_size;
int *temp = ori_root_of_ori_root;
ori_root_of_ori_root = ori_root;
ori_root = temp;
ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
stride *= blockSize;
} while (true);
hipMemcpy(dev_odata, dev_data, sizeof(int) * n, hipMemcpyDeviceToDevice);
//checkCUDAError("hipMemcpy failed!");
hipFree(dev_data);
hipFree(ori_root);
hipFree(ori_root_of_ori_root);
}
int compactDynamicShared(int n, PathSegment *dev_data) {
// compact Set-up
int* bools;
int* indices;
int* dev_count;
int count;
dim3 blockDim(blockSize);
dim3 gridDim((n + blockSize - 1) / blockSize);
hipMalloc((void**)&bools, n * sizeof(int));
//checkCUDAError("hipMalloc bools failed!");
hipMalloc((void**)&dev_count, sizeof(int));
//checkCUDAError("hipMalloc dev_count failed!");
hipDeviceSynchronize();
// Scan Set-up
// gridDim.x : has to be 2 ^n, which is our efficient compaction algorithm requires
// size : acutal size + filled 0s
int size = gridDim.x * blockSize;
int* ori_root;
// ori_root_size has to be like that(first divide blockSize then multiply),
// because also needs to meet efficient algorithm requirement
// eg.
// blockSize == 4,
// indicies : 0 1 2 3 | 4 5 -> 0 1 2 3 | 4 5 0 0
// elcusive_scan result : 0 0 1 3 | 0 4 9 9
// ori_root : 6 9 (0 0)
int ori_root_size = (gridDim.x + blockSize - 1) / blockSize;
ori_root_size *= blockSize;
hipMalloc((void**)&indices, size * sizeof(int));
//checkCUDAError("hipMalloc indices failed!");
hipMalloc((void**)&ori_root, sizeof(int) * ori_root_size);
//checkCUDAError("hipMalloc ori_root failed!");
hipDeviceSynchronize();
kernSetZero << < gridDim, blockDim >> > (size, indices);
//checkCUDAError("kernSetZero failed!");
kernSetZero << < dim3((ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_size, ori_root);
//checkCUDAError("kernSetZero failed!");
int sharedMemoryPerBlockInBytes = blockDim.x * sizeof(int);
// Step 1 : compute bools array
kernMapToBoolean << <gridDim, blockDim >> > (n, bools, dev_data);
//checkCUDAError("kernMapToBoolean failed!");
// indeices# >= bools#
hipMemcpy(indices, bools, sizeof(int) * n, hipMemcpyDeviceToDevice);
//checkCUDAError("hipMemcpy failed!");
// Step 2 : exclusive scan indices
kernScanDynamicShared << <gridDim, dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, indices, indices, ori_root);
//checkCUDAError("kernScanDynamicShared 1 failed!");
// Step 2.5 : scans of scan
// like ori_root_size,
// ori_root_of_ori_root_size has to align with blockSize
int *ori_root_of_ori_root;
int ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
hipMalloc((void**)&ori_root_of_ori_root, sizeof(int) * ori_root_of_ori_root_size);
//checkCUDAError("hipMalloc ori_root_of_ori_root failed!");
int stride = 1;
do {
// do scan of scan of scan here
kernSetZero << < dim3((ori_root_of_ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_of_ori_root_size, ori_root_of_ori_root);
//checkCUDAError("kernSetZero failed!");
kernScanDynamicShared << < dim3(ori_root_size / blockSize), dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, ori_root, ori_root, ori_root_of_ori_root);
//checkCUDAError("kernScanDynamicShared 2 failed!");
kernAddOriRoot << <gridDim, blockDim >> > (size, ilog2(stride), ori_root, indices);
//checkCUDAError("kernAddOriRoot failed!");
// exit here
// we exit until there is only one block
if (ori_root_size == blockSize) {
break;
}
// reset ori_root and ori_root_of_ori_root infomation
ori_root_size = ori_root_of_ori_root_size;
int *temp = ori_root_of_ori_root;
ori_root_of_ori_root = ori_root;
ori_root = temp;
ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
stride *= blockSize;
} while (true);
// Step 3 : Sort (Scatter)
kernSetCompactCount << <dim3(1), dim3(1) >> > (n, dev_count, bools, indices);
//checkCUDAError("kernSetCompactCount failed!");
// Since scatter just discard elements who doesn't meet our criterion(bools value = 0)
// However, we don't want discard pathSegments whoes remaining bounce is 0, we still its color info
// after this iteration ends.
// So, instead of scattering, we just sort here.
// bools value == 1 will put ahead, and 0 behind.
/*kernScatter << <gridDim, blockDim >> > (n, dev_data, dev_data, bools, indices);
checkCUDAError("kernScatter failed!");*/
thrust::device_ptr<int> dev_thrust_keys(bools);
thrust::device_ptr<PathSegment> dev_thrust_values(dev_data);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + n, dev_thrust_values, thrust::greater<int>());
hipMemcpy(&count, dev_count, sizeof(int), hipMemcpyDeviceToHost);
//checkCUDAError("hipMemcpy failed!");
hipFree(bools);
hipFree(dev_count);
hipFree(indices);
hipFree(ori_root);
hipFree(ori_root_of_ori_root);
return count;
}
}
}
| 31f7702a9aac059cfa940543b32f4a221cba11ab.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include "EfficientStreamCompaction.h"
namespace StreamCompaction {
namespace Efficient {
__global__ void kernSetZero(int N, int* dev_data) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
dev_data[index] = 0;
}
__global__ void kernMapToBoolean(int N, int *bools, PathSegment *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
bools[index] = idata[index].remainingBounces ? 1 : 0;
}
//__global__ void kernScatter(int n, PathSegment *odata,
// PathSegment *idata, const int *bools, const int *indices) {
// int index = threadIdx.x + (blockIdx.x * blockDim.x);
// if (index >= n) {
// return;
// }
// if (bools[index]) {
// odata[indices[index]] = idata[index];
// }
//}
__global__ void kernSetCompactCount(int N, int* dev_count, int* bools, int* indices) {
dev_count[0] = bools[N - 1] ? (indices[N - 1] + 1) : indices[N - 1];
}
__global__ void kernScanDynamicShared(int n, int *g_odata, int *g_idata, int *OriRoot) {
extern __shared__ int temp[];
int thid = threadIdx.x;
// assume it's always a 1D block
int blockOffset = 2 * blockDim.x * blockIdx.x;
int offset = 1;
temp[2 * thid] = g_idata[blockOffset + 2 * thid];
temp[2 * thid + 1] = g_idata[blockOffset + 2 * thid + 1];
// Up-sweep
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
// save origin root and set it to zero
if (thid == 0) {
OriRoot[blockIdx.x] = temp[n - 1];
temp[n - 1] = 0;
}
// Down-sweep
for (int d = 1; d < n; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[blockOffset + 2 * thid] = temp[2 * thid];
g_odata[blockOffset + 2 * thid + 1] = temp[2 * thid + 1];
}
__global__ void kernAddOriRoot(int N, int LeftMoveBits, int* OriRoot, int* dev_odata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//dev_odata[index] += OriRoot[blockIdx.x / stride];
dev_odata[index] += OriRoot[blockIdx.x >> LeftMoveBits];
}
void scanDynamicShared(int n, int *dev_odata, const int *dev_idata) {
int* dev_data;
dim3 blockDim(blockSize);
dim3 gridDim((n + blockSize - 1) / blockSize);
int size = gridDim.x * blockSize;
cudaMalloc((void**)&dev_data, sizeof(int) * size);
//checkCUDAError("cudaMalloc dev_idata failed!");
cudaDeviceSynchronize();
kernSetZero << < gridDim, blockDim >> > (size, dev_data);
//checkCUDAError("kernSetZero failed!");
int* ori_root;
// ori_root_size has to be like that(first divide blockSize then multiply),
// because also needs to meet efficient algorithm requirement
// eg.
// blockSize == 4,
// indicies : 0 1 2 3 | 4 5 -> 0 1 2 3 | 4 5 0 0
// elcusive_scan result : 0 0 1 3 | 0 4 9 9
// ori_root : 6 9 (0 0)
int ori_root_size = (gridDim.x + blockSize - 1) / blockSize;
ori_root_size *= blockSize;
cudaMalloc((void**)&ori_root, sizeof(int) * ori_root_size);
//checkCUDAError("cudaMalloc ori_root failed!");
cudaDeviceSynchronize();
kernSetZero << < dim3((ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_size, ori_root);
//checkCUDAError("kernSetZero failed!");
cudaMemcpy(dev_data, dev_idata, sizeof(int) * n, cudaMemcpyDeviceToDevice);
//checkCUDAError("cudaMemcpy failed!");
int sharedMemoryPerBlockInBytes = blockDim.x * sizeof(int);
// Step 1 : do scan
kernScanDynamicShared << <gridDim, dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, dev_data, dev_data, ori_root);
// Step 2.5 : scans of scan
// like ori_root_size,
// ori_root_of_ori_root_size has to align with blockSize
int *ori_root_of_ori_root;
int ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
cudaMalloc((void**)&ori_root_of_ori_root, sizeof(int) * ori_root_of_ori_root_size);
//checkCUDAError("cudaMalloc ori_root_of_ori_root failed!");
int stride = 1;
do {
// do scan of scan of scan here
kernSetZero << < dim3((ori_root_of_ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_of_ori_root_size, ori_root_of_ori_root);
//checkCUDAError("kernSetZero failed!");
kernScanDynamicShared << < dim3(ori_root_size / blockSize), dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, ori_root, ori_root, ori_root_of_ori_root);
//checkCUDAError("kernScanDynamicShared 2 failed!");
kernAddOriRoot << <gridDim, blockDim >> > (size, ilog2(stride), ori_root, dev_data);
//checkCUDAError("kernAddOriRoot failed!");
// exit here
// we exit until there is only one block
if (ori_root_size == blockSize) {
break;
}
// reset ori_root and ori_root_of_ori_root infomation
ori_root_size = ori_root_of_ori_root_size;
int *temp = ori_root_of_ori_root;
ori_root_of_ori_root = ori_root;
ori_root = temp;
ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
stride *= blockSize;
} while (true);
cudaMemcpy(dev_odata, dev_data, sizeof(int) * n, cudaMemcpyDeviceToDevice);
//checkCUDAError("cudaMemcpy failed!");
cudaFree(dev_data);
cudaFree(ori_root);
cudaFree(ori_root_of_ori_root);
}
int compactDynamicShared(int n, PathSegment *dev_data) {
// compact Set-up
int* bools;
int* indices;
int* dev_count;
int count;
dim3 blockDim(blockSize);
dim3 gridDim((n + blockSize - 1) / blockSize);
cudaMalloc((void**)&bools, n * sizeof(int));
//checkCUDAError("cudaMalloc bools failed!");
cudaMalloc((void**)&dev_count, sizeof(int));
//checkCUDAError("cudaMalloc dev_count failed!");
cudaDeviceSynchronize();
// Scan Set-up
// gridDim.x : has to be 2 ^n, which is our efficient compaction algorithm requires
// size : acutal size + filled 0s
int size = gridDim.x * blockSize;
int* ori_root;
// ori_root_size has to be like that(first divide blockSize then multiply),
// because also needs to meet efficient algorithm requirement
// eg.
// blockSize == 4,
// indicies : 0 1 2 3 | 4 5 -> 0 1 2 3 | 4 5 0 0
// elcusive_scan result : 0 0 1 3 | 0 4 9 9
// ori_root : 6 9 (0 0)
int ori_root_size = (gridDim.x + blockSize - 1) / blockSize;
ori_root_size *= blockSize;
cudaMalloc((void**)&indices, size * sizeof(int));
//checkCUDAError("cudaMalloc indices failed!");
cudaMalloc((void**)&ori_root, sizeof(int) * ori_root_size);
//checkCUDAError("cudaMalloc ori_root failed!");
cudaDeviceSynchronize();
kernSetZero << < gridDim, blockDim >> > (size, indices);
//checkCUDAError("kernSetZero failed!");
kernSetZero << < dim3((ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_size, ori_root);
//checkCUDAError("kernSetZero failed!");
int sharedMemoryPerBlockInBytes = blockDim.x * sizeof(int);
// Step 1 : compute bools array
kernMapToBoolean << <gridDim, blockDim >> > (n, bools, dev_data);
//checkCUDAError("kernMapToBoolean failed!");
// indeices# >= bools#
cudaMemcpy(indices, bools, sizeof(int) * n, cudaMemcpyDeviceToDevice);
//checkCUDAError("cudaMemcpy failed!");
// Step 2 : exclusive scan indices
kernScanDynamicShared << <gridDim, dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, indices, indices, ori_root);
//checkCUDAError("kernScanDynamicShared 1 failed!");
// Step 2.5 : scans of scan
// like ori_root_size,
// ori_root_of_ori_root_size has to align with blockSize
int *ori_root_of_ori_root;
int ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
cudaMalloc((void**)&ori_root_of_ori_root, sizeof(int) * ori_root_of_ori_root_size);
//checkCUDAError("cudaMalloc ori_root_of_ori_root failed!");
int stride = 1;
do {
// do scan of scan of scan here
kernSetZero << < dim3((ori_root_of_ori_root_size + blockDim.x - 1) / blockDim.x), blockDim >> > (ori_root_of_ori_root_size, ori_root_of_ori_root);
//checkCUDAError("kernSetZero failed!");
kernScanDynamicShared << < dim3(ori_root_size / blockSize), dim3(blockDim.x / 2), sharedMemoryPerBlockInBytes >> > (blockDim.x, ori_root, ori_root, ori_root_of_ori_root);
//checkCUDAError("kernScanDynamicShared 2 failed!");
kernAddOriRoot << <gridDim, blockDim >> > (size, ilog2(stride), ori_root, indices);
//checkCUDAError("kernAddOriRoot failed!");
// exit here
// we exit until there is only one block
if (ori_root_size == blockSize) {
break;
}
// reset ori_root and ori_root_of_ori_root infomation
ori_root_size = ori_root_of_ori_root_size;
int *temp = ori_root_of_ori_root;
ori_root_of_ori_root = ori_root;
ori_root = temp;
ori_root_of_ori_root_size = ori_root_size / blockSize;
ori_root_of_ori_root_size = (ori_root_of_ori_root_size + blockSize - 1) / blockSize;
ori_root_of_ori_root_size *= blockSize;
stride *= blockSize;
} while (true);
// Step 3 : Sort (Scatter)
kernSetCompactCount << <dim3(1), dim3(1) >> > (n, dev_count, bools, indices);
//checkCUDAError("kernSetCompactCount failed!");
// Since scatter just discard elements who doesn't meet our criterion(bools value = 0)
// However, we don't want discard pathSegments whoes remaining bounce is 0, we still its color info
// after this iteration ends.
// So, instead of scattering, we just sort here.
// bools value == 1 will put ahead, and 0 behind.
/*kernScatter << <gridDim, blockDim >> > (n, dev_data, dev_data, bools, indices);
checkCUDAError("kernScatter failed!");*/
thrust::device_ptr<int> dev_thrust_keys(bools);
thrust::device_ptr<PathSegment> dev_thrust_values(dev_data);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + n, dev_thrust_values, thrust::greater<int>());
cudaMemcpy(&count, dev_count, sizeof(int), cudaMemcpyDeviceToHost);
//checkCUDAError("cudaMemcpy failed!");
cudaFree(bools);
cudaFree(dev_count);
cudaFree(indices);
cudaFree(ori_root);
cudaFree(ori_root_of_ori_root);
return count;
}
}
}
|
08bb490475ec459c062b37b79ebfbad908c633e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
SinGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * cos(__ldg(X + i));
#else
dX[i] = dY[i] * cos(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SinGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SinGradientCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dY, X, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Sin,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
SinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinGradientFunctor<CUDAContext>>);
} // namespace caffe2
| 08bb490475ec459c062b37b79ebfbad908c633e4.cu | #include "caffe2/operators/sin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
SinGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * cos(__ldg(X + i));
#else
dX[i] = dY[i] * cos(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SinGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
SinGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Sin,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
SinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
db70adbe37b01bc2740c55fee115125d3b843857.hip | // !!! This is a file automatically generated by hipify!!!
/* Author: Qin Ma <maqin@csbl.bmb.uga.edu>, Jan. 25, 2010
* Usage: This is part of the bicluster package. Use, redistribute, modify
* without limitations.
*
* Produces two graphs sequentially, derived from microarray data.
*
* The first graph is generated from the raw data where an edge is defined
* as two genes having common components from same condition, the score on
* the edge being the number of the same components. The edges in the first
* graph, are used as vertices in the second graph, where edges are defined
* as the common columns between two co-vertex edges in the first graph,
* with scores defined as the number of same columns for all three genes.
*
*/
#include "make_graph.h"
#include "utils.h"
#include <hip/hip_runtime.h>
#include <cuda-runtime.h>
/*we can reduce the HEAP_SIZE when the data contain so many genes so that memory is not enough*/
/**************************************************************************/
/* String intersection function without string copying, only numbers */
/*icaculate the weight of the edge in the first graph*/
__global__ static void str_instersect_r(const discrete *s1, const discrete *s2, int dcommon_cnt, int dcols)
{
int tx = threadIdx.x;
int common_cnt = 0, i;
for(i = 0; i <tx; ++i)
{
if(i < dcols)
{
if(*s1 == *s2 && (*s1!=0))
common_cnt++;
s1++;
s2++;
}
}
__syncthreads();
dcommon_cnt = common_cnt;
}
__global__ void seed_deduct(const discrete *s, int *dprofile, int dcols)
{
int tx = threadIdx.x;
int i;
discrete ss;
for(i = 0; i < tx; ++i)
{
if(i < dcols)
{
ss = s[i];
dprofile[i][ss]--;
}
}
}
__global__ void seed_update(const discrete *s, int *dprofiles, int dcols)
{
int tx = threadIdx.x;
int i;
for(i = 0; i < tx; i++)
{
if(i < dcols)
dprofile[i][s[i]]++;
}
}
| db70adbe37b01bc2740c55fee115125d3b843857.cu | /* Author: Qin Ma <maqin@csbl.bmb.uga.edu>, Jan. 25, 2010
* Usage: This is part of the bicluster package. Use, redistribute, modify
* without limitations.
*
* Produces two graphs sequentially, derived from microarray data.
*
* The first graph is generated from the raw data where an edge is defined
* as two genes having common components from same condition, the score on
* the edge being the number of the same components. The edges in the first
* graph, are used as vertices in the second graph, where edges are defined
* as the common columns between two co-vertex edges in the first graph,
* with scores defined as the number of same columns for all three genes.
*
*/
#include "make_graph.h"
#include "utils.h"
#include <cuda.h>
#include <cuda-runtime.h>
/*we can reduce the HEAP_SIZE when the data contain so many genes so that memory is not enough*/
/**************************************************************************/
/* String intersection function without string copying, only numbers */
/*icaculate the weight of the edge in the first graph*/
__global__ static void str_instersect_r(const discrete *s1, const discrete *s2, int dcommon_cnt, int dcols)
{
int tx = threadIdx.x;
int common_cnt = 0, i;
for(i = 0; i <tx; ++i)
{
if(i < dcols)
{
if(*s1 == *s2 && (*s1!=0))
common_cnt++;
s1++;
s2++;
}
}
__syncthreads();
dcommon_cnt = common_cnt;
}
__global__ void seed_deduct(const discrete *s, int *dprofile, int dcols)
{
int tx = threadIdx.x;
int i;
discrete ss;
for(i = 0; i < tx; ++i)
{
if(i < dcols)
{
ss = s[i];
dprofile[i][ss]--;
}
}
}
__global__ void seed_update(const discrete *s, int *dprofiles, int dcols)
{
int tx = threadIdx.x;
int i;
for(i = 0; i < tx; i++)
{
if(i < dcols)
dprofile[i][s[i]]++;
}
}
|
5f59151c2e9561efece56db34d053cf7df222f56.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
/*! Mass of one "planet." */
#define planetMass 3e8f
/*! Mass of the "star" at the center. */
#define starMass 5e10f
/*! Size of the starting area in simulation space. */
const float scene_scale = 1e2;
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
glm::vec3 *dev_pos;
glm::vec3 *dev_vel;
glm::vec3 *dev_acc;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* CUDA kernel for generating planets with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale, float mass) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = 0.1 * scale * sqrt(rand.x * rand.x + rand.y * rand.y) * rand.z;
}
}
/**
* CUDA kernel for generating velocities in a vortex around the origin.
* This is just to make for an interesting-looking scene.
*/
__global__ void kernGenerateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec3 * pos) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z);
float r = glm::length(R) + EPSILON;
float s = sqrt(G * starMass / r);
glm::vec3 D = glm::normalize(glm::cross(R / r, glm::vec3(0, 0, 1)));
arr[index].x = s * D.x;
arr[index].y = s * D.y;
arr[index].z = s * D.z;
}
}
/**
* Initialize memory, update some globals
*/
void Nbody::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel failed!");
hipMalloc((void**)&dev_acc, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_acc failed!");
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale, planetMass);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
hipLaunchKernelGGL(( kernGenerateCircularVelArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 2, numObjects, dev_vel, dev_pos);
checkCUDAErrorWithLine("kernGenerateCircularVelArray failed!");
hipDeviceSynchronize();
}
/******************
* copyPlanetsToVBO *
******************/
/**
* Copy the planet positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPlanetsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1;
}
}
/**
* Wrapper for call to the kernCopyPlanetsToVBO CUDA kernel.
*/
void Nbody::copyPlanetsToVBO(float *vbodptr) {
dim3 fullBlocksPerGrid((int)ceil(float(numObjects) / float(blockSize)));
hipLaunchKernelGGL(( kernCopyPlanetsToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, vbodptr, scene_scale);
checkCUDAErrorWithLine("copyPlanetsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* Compute the acceleration on the body with index `iSelf` due to the `N`
* bodies in the array `pos`.
*/
__device__ glm::vec3 accelerate(int N, int iSelf, const glm::vec3 *pos) {
// TODO: Compute the acceleration on `my_pos` due to:
glm::vec3 origin = glm::vec3(0, 0, 0);
glm::vec3 my_pos = glm::vec3(pos[iSelf].x, pos[iSelf].y, pos[iSelf].z);
//*********star*************
glm::vec3 star_r = origin - my_pos;
double star_l = glm::length(star_r);
glm::vec3 star_n = glm::normalize(star_r);
double star_g = G*starMass / (star_l*star_l + sqrt(EPSILON));//+ EPSILON);
glm::vec3 star_gdir = glm::vec3(star_g*star_n.x, star_g*star_n.y, star_g*star_n.z);
glm::vec3 planet_gdir(0, 0, 0);
for (int i = 0; i < N; i++) {
if (i != iSelf){
glm::vec3 other_pos = glm::vec3(pos[i].x, pos[i].y, pos[i].z);
glm::vec3 planet_r = other_pos - my_pos;
glm::vec3 planet_n = glm::normalize(planet_r);
double planet_l = glm::length(planet_r);
double planet_g = G*planetMass / (planet_l*planet_l + sqrt(EPSILON));// EPSILON);
glm::vec3 temp = glm::vec3(planet_g*planet_n.x, planet_g*planet_n.y, planet_g*planet_n.z);
planet_gdir += temp;
}
}
glm::vec3 result = star_gdir + planet_gdir;
return result;
}
/**
* For each of the `N` bodies, update its acceleration.
* Compute the total instantaneous acceleration using `accelerate`, then store that into `acc`.
*/
__global__ void kernUpdateAcc(int N, float dt, const glm::vec3 *pos, glm::vec3 *acc) {
// TODO: implement kernUpdateAcc.
// This function body runs once on each CUDA thread.
// To avoid race conditions, each instance should only write ONE value to `acc`!
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N){
acc[index] = accelerate(N, index, pos);
}
}
/**
* For each of the `N` bodies, update its velocity, then update its position, using a
* simple Euler integration scheme. Acceleration must be updated before calling this kernel.
*/
__global__ void kernUpdateVelPos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel, const glm::vec3 *acc) {
// TODO: implement kernUpdateVelPos.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N){
vel[index] += acc[index] * dt;
pos[index] += vel[index] * dt;
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Nbody::stepSimulation(float dt) {
// TODO: Using the CUDA kernels you wrote above, write a function that
// calls the kernels to perform a full simulation step.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateAcc << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_acc);
kernUpdateVelPos << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel, dev_acc);
//kernUpdateVelPos <<< fullBlocksPerGrid, threadsPerBlock >>>(numObjects, dt, dev_pos, dev_vel, dev_acc);
}
void Nbody::endSimulation() {
hipFree(dev_acc);
hipFree(dev_vel);
hipFree(dev_pos);
}
| 5f59151c2e9561efece56db34d053cf7df222f56.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
/*! Mass of one "planet." */
#define planetMass 3e8f
/*! Mass of the "star" at the center. */
#define starMass 5e10f
/*! Size of the starting area in simulation space. */
const float scene_scale = 1e2;
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
glm::vec3 *dev_pos;
glm::vec3 *dev_vel;
glm::vec3 *dev_acc;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* CUDA kernel for generating planets with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale, float mass) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = 0.1 * scale * sqrt(rand.x * rand.x + rand.y * rand.y) * rand.z;
}
}
/**
* CUDA kernel for generating velocities in a vortex around the origin.
* This is just to make for an interesting-looking scene.
*/
__global__ void kernGenerateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec3 * pos) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z);
float r = glm::length(R) + EPSILON;
float s = sqrt(G * starMass / r);
glm::vec3 D = glm::normalize(glm::cross(R / r, glm::vec3(0, 0, 1)));
arr[index].x = s * D.x;
arr[index].y = s * D.y;
arr[index].z = s * D.z;
}
}
/**
* Initialize memory, update some globals
*/
void Nbody::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel failed!");
cudaMalloc((void**)&dev_acc, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_acc failed!");
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale, planetMass);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
kernGenerateCircularVelArray<<<fullBlocksPerGrid, blockSize>>>(2, numObjects, dev_vel, dev_pos);
checkCUDAErrorWithLine("kernGenerateCircularVelArray failed!");
cudaThreadSynchronize();
}
/******************
* copyPlanetsToVBO *
******************/
/**
* Copy the planet positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPlanetsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1;
}
}
/**
* Wrapper for call to the kernCopyPlanetsToVBO CUDA kernel.
*/
void Nbody::copyPlanetsToVBO(float *vbodptr) {
dim3 fullBlocksPerGrid((int)ceil(float(numObjects) / float(blockSize)));
kernCopyPlanetsToVBO<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, vbodptr, scene_scale);
checkCUDAErrorWithLine("copyPlanetsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* Compute the acceleration on the body with index `iSelf` due to the `N`
* bodies in the array `pos`.
*/
__device__ glm::vec3 accelerate(int N, int iSelf, const glm::vec3 *pos) {
// TODO: Compute the acceleration on `my_pos` due to:
glm::vec3 origin = glm::vec3(0, 0, 0);
glm::vec3 my_pos = glm::vec3(pos[iSelf].x, pos[iSelf].y, pos[iSelf].z);
//*********star*************
glm::vec3 star_r = origin - my_pos;
double star_l = glm::length(star_r);
glm::vec3 star_n = glm::normalize(star_r);
double star_g = G*starMass / (star_l*star_l + sqrt(EPSILON));//+ EPSILON);
glm::vec3 star_gdir = glm::vec3(star_g*star_n.x, star_g*star_n.y, star_g*star_n.z);
glm::vec3 planet_gdir(0, 0, 0);
for (int i = 0; i < N; i++) {
if (i != iSelf){
glm::vec3 other_pos = glm::vec3(pos[i].x, pos[i].y, pos[i].z);
glm::vec3 planet_r = other_pos - my_pos;
glm::vec3 planet_n = glm::normalize(planet_r);
double planet_l = glm::length(planet_r);
double planet_g = G*planetMass / (planet_l*planet_l + sqrt(EPSILON));// EPSILON);
glm::vec3 temp = glm::vec3(planet_g*planet_n.x, planet_g*planet_n.y, planet_g*planet_n.z);
planet_gdir += temp;
}
}
glm::vec3 result = star_gdir + planet_gdir;
return result;
}
/**
* For each of the `N` bodies, update its acceleration.
* Compute the total instantaneous acceleration using `accelerate`, then store that into `acc`.
*/
__global__ void kernUpdateAcc(int N, float dt, const glm::vec3 *pos, glm::vec3 *acc) {
// TODO: implement kernUpdateAcc.
// This function body runs once on each CUDA thread.
// To avoid race conditions, each instance should only write ONE value to `acc`!
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N){
acc[index] = accelerate(N, index, pos);
}
}
/**
* For each of the `N` bodies, update its velocity, then update its position, using a
* simple Euler integration scheme. Acceleration must be updated before calling this kernel.
*/
__global__ void kernUpdateVelPos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel, const glm::vec3 *acc) {
// TODO: implement kernUpdateVelPos.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N){
vel[index] += acc[index] * dt;
pos[index] += vel[index] * dt;
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Nbody::stepSimulation(float dt) {
// TODO: Using the CUDA kernels you wrote above, write a function that
// calls the kernels to perform a full simulation step.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateAcc << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_acc);
kernUpdateVelPos << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel, dev_acc);
//kernUpdateVelPos <<< fullBlocksPerGrid, threadsPerBlock >>>(numObjects, dt, dev_pos, dev_vel, dev_acc);
}
void Nbody::endSimulation() {
cudaFree(dev_acc);
cudaFree(dev_vel);
cudaFree(dev_pos);
}
|
e360a778396a6593fc8f2fe839a66a0755291372.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zsymmetrize.cu normal z -> c, Tue Feb 9 16:05:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
csymmetrize_lower( int m, magmaFloatComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_C_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
csymmetrize_upper( int m, magmaFloatComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_C_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
CSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( csymmetrize_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
else {
hipLaunchKernelGGL(( csymmetrize_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
}
| e360a778396a6593fc8f2fe839a66a0755291372.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zsymmetrize.cu normal z -> c, Tue Feb 9 16:05:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
csymmetrize_lower( int m, magmaFloatComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_C_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
csymmetrize_upper( int m, magmaFloatComplex *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
magmaFloatComplex *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
magmaFloatComplex *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_C_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
CSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_csymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
csymmetrize_upper<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
else {
csymmetrize_lower<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
}
|
6a7e37730b3870bee09633cc9ba0f2dc4b2c661c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_align_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype pad_ratio, const Dtype* bottom_rois, const int interpolate_times, Dtype* top_data, int* argmax_data, Dtype* w_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// padding
Dtype pad_w, pad_h;
pad_w = (bottom_rois[3] - bottom_rois[1] + 1)*pad_ratio;
pad_h = (bottom_rois[4] - bottom_rois[2] + 1)*pad_ratio;
Dtype roi_start_w = (bottom_rois[1] - pad_w) * spatial_scale;
Dtype roi_start_h = (bottom_rois[2] - pad_h) * spatial_scale;
Dtype roi_end_w = (bottom_rois[3] + pad_w) * spatial_scale;
Dtype roi_end_h = (bottom_rois[4] + pad_h) * spatial_scale;
// clipping
roi_start_w = max(roi_start_w, Dtype(0)); roi_start_h = max(roi_start_h, Dtype(0));
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(Dtype(img_width - 1), roi_end_w);
roi_end_h = min(Dtype(img_height - 1), roi_end_h);
Dtype roi_height = max(roi_end_h - roi_start_h + 1, Dtype(1));
Dtype roi_width = max(roi_end_w - roi_start_w + 1, Dtype(1));
const Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
const Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
bottom_data += (roi_batch_ind * channels + c) * height * width;
float argmax_temp_data[4];
float w_temp_data[4];
float start_x = 0.25, start_y = 0.25;
if (interpolate_times == 1) {
start_x = 0.5;
start_y = 0.5;
}
Dtype dfValue = 0, maxValue = 0;
for (int inter_index = 0; inter_index < interpolate_times; ++inter_index) {
int index_x = inter_index / 2;
int index_y = inter_index % 2;
Dtype off_x = index_x * 0.5 + start_x;
Dtype off_y = index_y * 0.5 + start_y;
Dtype hcenter = static_cast<Dtype>(ph + off_x)* bin_size_h;
Dtype wcenter = static_cast<Dtype>(pw + off_y)* bin_size_w;
hcenter = min(max(hcenter + roi_start_h, Dtype(0)), Dtype(height - 1));
wcenter = min(max(wcenter + roi_start_w, Dtype(0)), Dtype(width - 1));
int hstart = min(max(hcenter, Dtype(0)), Dtype(height - 1));
int wstart = min(max(wcenter, Dtype(0)), Dtype(width - 1));
int hend = min(max(hstart + 1, 0), height - 1);
int wend = min(max(wstart + 1, 0), width - 1);
Dtype fX0 = wcenter - wstart;
Dtype fX1 = wend - wcenter;
Dtype fY0 = hcenter - hstart;
Dtype fY1 = hend - hcenter;
Dtype fFactorA = fY1 * fX1;
Dtype fFactorB = fY1 * fX0;
Dtype fFactorC = fY0 * fX1;
Dtype fFactorD = fY0 * fX0;
dfValue = bottom_data[hstart * width + wstart] * fFactorA
+ bottom_data[hstart * width + wend] * fFactorB
+ bottom_data[hend * width + wstart] * fFactorC
+ bottom_data[hend * width + wend] * fFactorD;
if (inter_index == 0) {
maxValue = dfValue - 1;
}
argmax_temp_data[0] = hstart * width + wstart;
argmax_temp_data[1] = hstart * width + wend;
argmax_temp_data[2] = hend * width + wstart;
argmax_temp_data[3] = hend * width + wend;
w_temp_data[0] = fFactorA;
w_temp_data[1] = fFactorB;
w_temp_data[2] = fFactorC;
w_temp_data[3] = fFactorD;
if (dfValue > maxValue || inter_index == 0) {
maxValue = dfValue;
top_data[index] = dfValue;
for (int s = 0; s < 4; ++s) {
w_data[4 * index + s] = w_temp_data[s];
argmax_data[4 * index + s] = argmax_temp_data[s];
}
}
}
}
}
template <typename Dtype>
__global__ void ROICubicForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype pad_ratio, const Dtype* bottom_rois, const int interpolate_times, Dtype* top_data, int* argmax_data, Dtype* w_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// padding
Dtype pad_w, pad_h;
pad_w = (bottom_rois[3] - bottom_rois[1] + 1)*pad_ratio;
pad_h = (bottom_rois[4] - bottom_rois[2] + 1)*pad_ratio;
Dtype roi_start_w = (bottom_rois[1] - pad_w) * spatial_scale;
Dtype roi_start_h = (bottom_rois[2] - pad_h) * spatial_scale;
Dtype roi_end_w = (bottom_rois[3] + pad_w) * spatial_scale;
Dtype roi_end_h = (bottom_rois[4] + pad_h) * spatial_scale;
// clipping
roi_start_w = max(roi_start_w, Dtype(0)); roi_start_h = max(roi_start_h, Dtype(0));
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(Dtype(img_width - 1), roi_end_w);
roi_end_h = min(Dtype(img_height - 1), roi_end_h);
Dtype roi_height = max(roi_end_h - roi_start_h + 1, Dtype(1));
Dtype roi_width = max(roi_end_w - roi_start_w + 1, Dtype(1));
const Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
const Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
bottom_data += (roi_batch_ind * channels + c) * height * width;
float argmax_temp_data[16];
float w_temp_data[16];
float start_x = 0.25, start_y = 0.25;
if (interpolate_times == 1) {
start_x = 0.5;
start_y = 0.5;
}
Dtype dfCubicValue = 0, maxValue = 0;
for (int inter_index = 0; inter_index < interpolate_times; ++inter_index) {
int index_x = inter_index / 2;
int index_y = inter_index % 2;
Dtype off_x = index_x * 0.5 + start_x;
Dtype off_y = index_y * 0.5 + start_y;
Dtype hcenter = static_cast<Dtype>(ph + off_x)* bin_size_h;
Dtype wcenter = static_cast<Dtype>(pw + off_y)* bin_size_w;
hcenter = min(max(hcenter + roi_start_h, Dtype(0)), Dtype(height - 1));
wcenter = min(max(wcenter + roi_start_w, Dtype(0)), Dtype(width - 1));
int i = wcenter;
int j = hcenter;
/*get adjacent 16 values*/
float values[4][4];
int temp_c, temp_r;
for (int r = j - 1, s = 0; r <= j + 2; r++, s++){
for (int c = i - 1, t = 0; c <= i + 2; c++, t++){
//todo: ??16?,????
temp_c = min(max(Dtype(c), Dtype(0)), Dtype(width - 1));
temp_r = min(max(Dtype(r), Dtype(0)), Dtype(height - 1));
values[s][t] = bottom_data[temp_r*width + temp_c];
argmax_temp_data[s * 4 + t] = temp_r*width + temp_c;
}
}
/*calc the coeff*/
float u = wcenter - i;
float v = hcenter - j;
float A[4], C[4];
for (int distance = 1, s = 0; distance >= -2; distance--, s++){
A[s] = cubic_coeff_gpu(u + distance);
C[s] = cubic_coeff_gpu(v + distance);
}
dfCubicValue = 0;
for (int s = 0; s < 4; s++) {
for (int t = 0; t < 4; t++) {
dfCubicValue += values[s][t] * A[t] * C[s];
w_temp_data[s * 4 + t] = A[t] * C[s];
}
}
if (dfCubicValue > maxValue || inter_index == 0) {
maxValue = dfCubicValue;
top_data[index] = dfCubicValue;
for (int s = 0; s < 16; ++s) {
w_data[16 * index + s] = w_temp_data[s];
argmax_data[16 * index + s] = argmax_temp_data[s];
}
}
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = bili_idx.mutable_gpu_data();
Dtype* w_data = bili_w.mutable_gpu_data();
int count = top[0]->count();
int interpolate_times = is_multi_interpolate ? 4 : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
if (bi_type == BiCubic) {
ROICubicForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, pad_ratio_, bottom_rois, interpolate_times, top_data, argmax_data, w_data);
}
else {
ROIAlignForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, pad_ratio_, bottom_rois, interpolate_times, top_data, argmax_data, w_data);
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const Dtype* w_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int w_num, const Dtype pad_ratio,
Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
//int pw = index % pooled_width;
//int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
for (int i = 0; i < w_num; ++i) {
if (argmax_data[w_num * index + i] >= 0) {
int offset_bottom = (roi_batch_ind * channels + c) * height
* width + argmax_data[w_num * index + i];
bottom_diff[offset_bottom] += top_diff[index] * w_data[w_num * index + i];
}
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = bili_idx.gpu_data();
const Dtype* w_data = bili_w.gpu_data();
const int top_count = top[0]->count();
int w_num = 4;
if (bi_type == BiCubic) {
w_num = 16;
}
// NOLINT_NEXT_LINE(whitespace/operators)
ROIAlignBackward<Dtype> << <CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS >> >(
top_count, top_diff, argmax_data, w_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, w_num, pad_ratio_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer);
} // namespace caffe
| 6a7e37730b3870bee09633cc9ba0f2dc4b2c661c.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_align_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype pad_ratio, const Dtype* bottom_rois, const int interpolate_times, Dtype* top_data, int* argmax_data, Dtype* w_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// padding
Dtype pad_w, pad_h;
pad_w = (bottom_rois[3] - bottom_rois[1] + 1)*pad_ratio;
pad_h = (bottom_rois[4] - bottom_rois[2] + 1)*pad_ratio;
Dtype roi_start_w = (bottom_rois[1] - pad_w) * spatial_scale;
Dtype roi_start_h = (bottom_rois[2] - pad_h) * spatial_scale;
Dtype roi_end_w = (bottom_rois[3] + pad_w) * spatial_scale;
Dtype roi_end_h = (bottom_rois[4] + pad_h) * spatial_scale;
// clipping
roi_start_w = max(roi_start_w, Dtype(0)); roi_start_h = max(roi_start_h, Dtype(0));
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(Dtype(img_width - 1), roi_end_w);
roi_end_h = min(Dtype(img_height - 1), roi_end_h);
Dtype roi_height = max(roi_end_h - roi_start_h + 1, Dtype(1));
Dtype roi_width = max(roi_end_w - roi_start_w + 1, Dtype(1));
const Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
const Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
bottom_data += (roi_batch_ind * channels + c) * height * width;
float argmax_temp_data[4];
float w_temp_data[4];
float start_x = 0.25, start_y = 0.25;
if (interpolate_times == 1) {
start_x = 0.5;
start_y = 0.5;
}
Dtype dfValue = 0, maxValue = 0;
for (int inter_index = 0; inter_index < interpolate_times; ++inter_index) {
int index_x = inter_index / 2;
int index_y = inter_index % 2;
Dtype off_x = index_x * 0.5 + start_x;
Dtype off_y = index_y * 0.5 + start_y;
Dtype hcenter = static_cast<Dtype>(ph + off_x)* bin_size_h;
Dtype wcenter = static_cast<Dtype>(pw + off_y)* bin_size_w;
hcenter = min(max(hcenter + roi_start_h, Dtype(0)), Dtype(height - 1));
wcenter = min(max(wcenter + roi_start_w, Dtype(0)), Dtype(width - 1));
int hstart = min(max(hcenter, Dtype(0)), Dtype(height - 1));
int wstart = min(max(wcenter, Dtype(0)), Dtype(width - 1));
int hend = min(max(hstart + 1, 0), height - 1);
int wend = min(max(wstart + 1, 0), width - 1);
Dtype fX0 = wcenter - wstart;
Dtype fX1 = wend - wcenter;
Dtype fY0 = hcenter - hstart;
Dtype fY1 = hend - hcenter;
Dtype fFactorA = fY1 * fX1;
Dtype fFactorB = fY1 * fX0;
Dtype fFactorC = fY0 * fX1;
Dtype fFactorD = fY0 * fX0;
dfValue = bottom_data[hstart * width + wstart] * fFactorA
+ bottom_data[hstart * width + wend] * fFactorB
+ bottom_data[hend * width + wstart] * fFactorC
+ bottom_data[hend * width + wend] * fFactorD;
if (inter_index == 0) {
maxValue = dfValue - 1;
}
argmax_temp_data[0] = hstart * width + wstart;
argmax_temp_data[1] = hstart * width + wend;
argmax_temp_data[2] = hend * width + wstart;
argmax_temp_data[3] = hend * width + wend;
w_temp_data[0] = fFactorA;
w_temp_data[1] = fFactorB;
w_temp_data[2] = fFactorC;
w_temp_data[3] = fFactorD;
if (dfValue > maxValue || inter_index == 0) {
maxValue = dfValue;
top_data[index] = dfValue;
for (int s = 0; s < 4; ++s) {
w_data[4 * index + s] = w_temp_data[s];
argmax_data[4 * index + s] = argmax_temp_data[s];
}
}
}
}
}
template <typename Dtype>
__global__ void ROICubicForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype pad_ratio, const Dtype* bottom_rois, const int interpolate_times, Dtype* top_data, int* argmax_data, Dtype* w_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
// padding
Dtype pad_w, pad_h;
pad_w = (bottom_rois[3] - bottom_rois[1] + 1)*pad_ratio;
pad_h = (bottom_rois[4] - bottom_rois[2] + 1)*pad_ratio;
Dtype roi_start_w = (bottom_rois[1] - pad_w) * spatial_scale;
Dtype roi_start_h = (bottom_rois[2] - pad_h) * spatial_scale;
Dtype roi_end_w = (bottom_rois[3] + pad_w) * spatial_scale;
Dtype roi_end_h = (bottom_rois[4] + pad_h) * spatial_scale;
// clipping
roi_start_w = max(roi_start_w, Dtype(0)); roi_start_h = max(roi_start_h, Dtype(0));
int img_width = round(width / spatial_scale);
int img_height = round(height / spatial_scale);
roi_end_w = min(Dtype(img_width - 1), roi_end_w);
roi_end_h = min(Dtype(img_height - 1), roi_end_h);
Dtype roi_height = max(roi_end_h - roi_start_h + 1, Dtype(1));
Dtype roi_width = max(roi_end_w - roi_start_w + 1, Dtype(1));
const Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
const Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
bottom_data += (roi_batch_ind * channels + c) * height * width;
float argmax_temp_data[16];
float w_temp_data[16];
float start_x = 0.25, start_y = 0.25;
if (interpolate_times == 1) {
start_x = 0.5;
start_y = 0.5;
}
Dtype dfCubicValue = 0, maxValue = 0;
for (int inter_index = 0; inter_index < interpolate_times; ++inter_index) {
int index_x = inter_index / 2;
int index_y = inter_index % 2;
Dtype off_x = index_x * 0.5 + start_x;
Dtype off_y = index_y * 0.5 + start_y;
Dtype hcenter = static_cast<Dtype>(ph + off_x)* bin_size_h;
Dtype wcenter = static_cast<Dtype>(pw + off_y)* bin_size_w;
hcenter = min(max(hcenter + roi_start_h, Dtype(0)), Dtype(height - 1));
wcenter = min(max(wcenter + roi_start_w, Dtype(0)), Dtype(width - 1));
int i = wcenter;
int j = hcenter;
/*get adjacent 16 values*/
float values[4][4];
int temp_c, temp_r;
for (int r = j - 1, s = 0; r <= j + 2; r++, s++){
for (int c = i - 1, t = 0; c <= i + 2; c++, t++){
//todo: ??16?,????
temp_c = min(max(Dtype(c), Dtype(0)), Dtype(width - 1));
temp_r = min(max(Dtype(r), Dtype(0)), Dtype(height - 1));
values[s][t] = bottom_data[temp_r*width + temp_c];
argmax_temp_data[s * 4 + t] = temp_r*width + temp_c;
}
}
/*calc the coeff*/
float u = wcenter - i;
float v = hcenter - j;
float A[4], C[4];
for (int distance = 1, s = 0; distance >= -2; distance--, s++){
A[s] = cubic_coeff_gpu(u + distance);
C[s] = cubic_coeff_gpu(v + distance);
}
dfCubicValue = 0;
for (int s = 0; s < 4; s++) {
for (int t = 0; t < 4; t++) {
dfCubicValue += values[s][t] * A[t] * C[s];
w_temp_data[s * 4 + t] = A[t] * C[s];
}
}
if (dfCubicValue > maxValue || inter_index == 0) {
maxValue = dfCubicValue;
top_data[index] = dfCubicValue;
for (int s = 0; s < 16; ++s) {
w_data[16 * index + s] = w_temp_data[s];
argmax_data[16 * index + s] = argmax_temp_data[s];
}
}
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = bili_idx.mutable_gpu_data();
Dtype* w_data = bili_w.mutable_gpu_data();
int count = top[0]->count();
int interpolate_times = is_multi_interpolate ? 4 : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
if (bi_type == BiCubic) {
ROICubicForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, pad_ratio_, bottom_rois, interpolate_times, top_data, argmax_data, w_data);
}
else {
ROIAlignForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, pad_ratio_, bottom_rois, interpolate_times, top_data, argmax_data, w_data);
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const Dtype* w_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int w_num, const Dtype pad_ratio,
Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
//int pw = index % pooled_width;
//int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
for (int i = 0; i < w_num; ++i) {
if (argmax_data[w_num * index + i] >= 0) {
int offset_bottom = (roi_batch_ind * channels + c) * height
* width + argmax_data[w_num * index + i];
bottom_diff[offset_bottom] += top_diff[index] * w_data[w_num * index + i];
}
}
}
}
template <typename Dtype>
void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = bili_idx.gpu_data();
const Dtype* w_data = bili_w.gpu_data();
const int top_count = top[0]->count();
int w_num = 4;
if (bi_type == BiCubic) {
w_num = 16;
}
// NOLINT_NEXT_LINE(whitespace/operators)
ROIAlignBackward<Dtype> << <CAFFE_GET_BLOCKS(top_count), CAFFE_CUDA_NUM_THREADS >> >(
top_count, top_diff, argmax_data, w_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, w_num, pad_ratio_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer);
} // namespace caffe
|
025b6ae96606c0d0cb5cf39b9846ebd918717959.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <common/allocatorAdapter.hpp>
#include <raft/comms/comms.hpp>
#include <raft/cuda_utils.cuh>
#include <raft/handle.hpp>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <rmm/exec_policy.hpp>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void findMaxAbsOfColumns(
T* input, int n_rows, int n_cols, T* max_vals, hipStream_t stream, bool row_major = false)
{
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
auto n = n_cols;
auto execution_policy = rmm::exec_policy(stream);
if (row_major) {
thrust::for_each(execution_policy, counting, counting + n_rows, [=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx;
int end = d_i + (m * n);
for (int i = d_i; i < end; i = i + m) {
T val = input[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
} else {
thrust::for_each(execution_policy, counting, counting + n_cols, [=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx * m;
int end = d_i + m;
for (int i = d_i; i < end; i++) {
T val = input[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
}
}
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void flip(T* input, int n_rows, int n_cols, T* max_vals, hipStream_t stream)
{
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
thrust::for_each(rmm::exec_policy(stream), counting, counting + n_cols, [=] __device__(int idx) {
int d_i = idx * m;
int end = d_i + m;
if (max_vals[idx] < 0.0) {
for (int i = d_i; i < end; i++) {
input[i] = -input[i];
}
}
});
}
/**
* @brief sign flip for PCA and tSVD. This is used to stabilize the sign of column major eigen
* vectors
* @input param handle: the internal cuml handle object
* @input/output param input param input: input matrix that will be used to determine the sign.
* @input param input_desc: MNMG description of the input
* @input/output param components: components matrix.
* @input param n_components: number of columns of components matrix
* @input param streams: cuda streams
* @input param n_streams: number of streams
* @{
*/
template <typename T>
void sign_flip_imp(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
Matrix::PartDescriptor& input_desc,
T* components,
int n_components,
hipStream_t* streams,
int n_stream)
{
int rank = handle.get_comms().get_rank();
const auto& comm = handle.get_comms();
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.blocksOwnedBy(rank);
rmm::device_uvector<T> max_vals(
::max(size_t(comm.get_size()), local_blocks.size()) * n_components, streams[0]);
for (std::size_t i = 0; i < input.size(); i++) {
T* mv_loc = max_vals.data() + (i * n_components);
findMaxAbsOfColumns(
input[i]->ptr, local_blocks[i]->size, n_components, mv_loc, streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
findMaxAbsOfColumns(
max_vals.data(), n_components, local_blocks.size(), max_vals.data(), streams[0], true);
comm.allgather(max_vals.data(), max_vals.data(), n_components, streams[0]);
comm.sync_stream(streams[0]);
findMaxAbsOfColumns(
max_vals.data(), n_components, comm.get_size(), max_vals.data(), streams[0], true);
for (std::size_t i = 0; i < local_blocks.size(); i++) {
flip(
input[i]->ptr, local_blocks[i]->size, n_components, max_vals.data(), streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
flip(components, input_desc.N, n_components, max_vals.data(), streams[0]);
}
void sign_flip(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
int n_components,
hipStream_t* streams,
int n_stream)
{
sign_flip_imp(handle, input_data, input_desc, components, n_components, streams, n_stream);
}
void sign_flip(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
int n_components,
hipStream_t* streams,
int n_stream)
{
sign_flip_imp(handle, input_data, input_desc, components, n_components, streams, n_stream);
}
} // namespace opg
} // namespace PCA
} // namespace ML
| 025b6ae96606c0d0cb5cf39b9846ebd918717959.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <cuml/decomposition/sign_flip_mg.hpp>
#include <common/allocatorAdapter.hpp>
#include <raft/comms/comms.hpp>
#include <raft/cuda_utils.cuh>
#include <raft/handle.hpp>
#include <raft/matrix/math.cuh>
#include <raft/matrix/matrix.cuh>
#include <rmm/exec_policy.hpp>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <cstddef>
using namespace MLCommon;
namespace ML {
namespace PCA {
namespace opg {
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void findMaxAbsOfColumns(
T* input, int n_rows, int n_cols, T* max_vals, cudaStream_t stream, bool row_major = false)
{
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
auto n = n_cols;
auto execution_policy = rmm::exec_policy(stream);
if (row_major) {
thrust::for_each(execution_policy, counting, counting + n_rows, [=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx;
int end = d_i + (m * n);
for (int i = d_i; i < end; i = i + m) {
T val = input[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
} else {
thrust::for_each(execution_policy, counting, counting + n_cols, [=] __device__(int idx) {
T max = 0.0;
int max_index = 0;
int d_i = idx * m;
int end = d_i + m;
for (int i = d_i; i < end; i++) {
T val = input[i];
if (val < 0.0) { val = -val; }
if (val > max) {
max = val;
max_index = i;
}
}
max_vals[idx] = input[max_index];
});
}
}
// TODO: replace these thrust code with cuda kernels or prims
template <typename T>
void flip(T* input, int n_rows, int n_cols, T* max_vals, cudaStream_t stream)
{
auto counting = thrust::make_counting_iterator(0);
auto m = n_rows;
thrust::for_each(rmm::exec_policy(stream), counting, counting + n_cols, [=] __device__(int idx) {
int d_i = idx * m;
int end = d_i + m;
if (max_vals[idx] < 0.0) {
for (int i = d_i; i < end; i++) {
input[i] = -input[i];
}
}
});
}
/**
* @brief sign flip for PCA and tSVD. This is used to stabilize the sign of column major eigen
* vectors
* @input param handle: the internal cuml handle object
* @input/output param input param input: input matrix that will be used to determine the sign.
* @input param input_desc: MNMG description of the input
* @input/output param components: components matrix.
* @input param n_components: number of columns of components matrix
* @input param streams: cuda streams
* @input param n_streams: number of streams
* @{
*/
template <typename T>
void sign_flip_imp(raft::handle_t& handle,
std::vector<Matrix::Data<T>*>& input,
Matrix::PartDescriptor& input_desc,
T* components,
int n_components,
cudaStream_t* streams,
int n_stream)
{
int rank = handle.get_comms().get_rank();
const auto& comm = handle.get_comms();
std::vector<Matrix::RankSizePair*> local_blocks = input_desc.blocksOwnedBy(rank);
rmm::device_uvector<T> max_vals(
std::max(size_t(comm.get_size()), local_blocks.size()) * n_components, streams[0]);
for (std::size_t i = 0; i < input.size(); i++) {
T* mv_loc = max_vals.data() + (i * n_components);
findMaxAbsOfColumns(
input[i]->ptr, local_blocks[i]->size, n_components, mv_loc, streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
findMaxAbsOfColumns(
max_vals.data(), n_components, local_blocks.size(), max_vals.data(), streams[0], true);
comm.allgather(max_vals.data(), max_vals.data(), n_components, streams[0]);
comm.sync_stream(streams[0]);
findMaxAbsOfColumns(
max_vals.data(), n_components, comm.get_size(), max_vals.data(), streams[0], true);
for (std::size_t i = 0; i < local_blocks.size(); i++) {
flip(
input[i]->ptr, local_blocks[i]->size, n_components, max_vals.data(), streams[i % n_stream]);
}
for (int i = 0; i < n_stream; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
flip(components, input_desc.N, n_components, max_vals.data(), streams[0]);
}
void sign_flip(raft::handle_t& handle,
std::vector<Matrix::Data<float>*>& input_data,
Matrix::PartDescriptor& input_desc,
float* components,
int n_components,
cudaStream_t* streams,
int n_stream)
{
sign_flip_imp(handle, input_data, input_desc, components, n_components, streams, n_stream);
}
void sign_flip(raft::handle_t& handle,
std::vector<Matrix::Data<double>*>& input_data,
Matrix::PartDescriptor& input_desc,
double* components,
int n_components,
cudaStream_t* streams,
int n_stream)
{
sign_flip_imp(handle, input_data, input_desc, components, n_components, streams, n_stream);
}
} // namespace opg
} // namespace PCA
} // namespace ML
|
d5d097070aab621982e3862bd3ceb28df3a002b4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
renderbox2 - a physically based gpu renderer for research purposes
Copyright (C) - 2014 - Srinath Ravichandran
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//
// Note: This kernel has been modified to better suit the architecture of renderbox2
//
// Application specific headers.
#include <accelerators/sbvh/cudatracerkernels.h>
// Cuda specific headers.
#include <hip/hip_runtime.h>
// Standard c++ headers.
#ifdef USE_KERNEL_FERMI
#define STACK_SIZE 64
KERNEL_FERMI_TRACE
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Initialize.
{
// Pick ray index.
rayidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y));
if (rayidx >= numRays)
return;
// Fetch ray.
float4 o = rays[rayidx * 2 + 0];
float4 d = rays[rayidx * 2 + 1];
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodes + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if (nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
STORE_RESULT(rayidx, hitIndex, hitT);
}
#else
// If compiled with cuda > 2 architecture, we absolutely do nothing.
// Calling code will have to report this.
KERNEL_FERMI_TRACE
{
// Empty kernel call.
}
#endif
| d5d097070aab621982e3862bd3ceb28df3a002b4.cu |
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
renderbox2 - a physically based gpu renderer for research purposes
Copyright (C) - 2014 - Srinath Ravichandran
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//
// Note: This kernel has been modified to better suit the architecture of renderbox2
//
// Application specific headers.
#include <accelerators/sbvh/cudatracerkernels.h>
// Cuda specific headers.
#include <cuda_runtime.h>
// Standard c++ headers.
#ifdef USE_KERNEL_FERMI
#define STACK_SIZE 64
KERNEL_FERMI_TRACE
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Initialize.
{
// Pick ray index.
rayidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y));
if (rayidx >= numRays)
return;
// Fetch ray.
float4 o = rays[rayidx * 2 + 0];
float4 d = rays[rayidx * 2 + 1];
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodes + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if (nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
STORE_RESULT(rayidx, hitIndex, hitT);
}
#else
// If compiled with cuda > 2 architecture, we absolutely do nothing.
// Calling code will have to report this.
KERNEL_FERMI_TRACE
{
// Empty kernel call.
}
#endif
|
1ffaed24299864a007165edcb3a26e29cebd4bf4.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <DllLoader.h>
#include <CudaMathEngine.h>
#include <CudaCommon.h>
#include <CublasFunctions.h>
#include <CusparseFunctions.h>
#include <CudaDevice.h>
#include <CudaAssert.h>
#include <MathEngineCommon.h>
#include <MemoryHandleInternal.h>
#include <MathEngineDeviceStackAllocator.h>
#include <MathEngineHostStackAllocator.h>
#include <math.h>
#include <float.h>
#include <hip/hip_runtime.h>
namespace NeoML {
static __constant__ const float ZeroDev = 0;
static __constant__ const float OneDev = 1;
const float* CCudaConst::Zero;
const float* CCudaConst::One;
const int CudaMemoryAlignment = 4;
//------------------------------------------------------------------------------------------------------------
CCudaMathEngine::CCudaMathEngine( const CCusparse* _cusparse, const CCublas* _cublas, std::unique_ptr<CCudaDevice>& _device ) :
cusparse( _cusparse ),
cublas( _cublas ),
cudaStream( 0 ),
cublasHandle( 0 ),
cusparseHandle( 0 )
{
device.swap( _device );
// CUDA
ASSERT_EXPR( device != 0 );
ASSERT_CUDA( hipSetDevice( device->DeviceNumber ) );
// CUDA stream.
ASSERT_CUDA( hipStreamCreate( &cudaStream ) );
// Cublas.
ASSERT_CUBLAS( cublas->Create( &cublasHandle ) );
ASSERT_CUBLAS( cublas->SetAtomicsMode( cublasHandle, HIPBLAS_ATOMICS_ALLOWED ) );
ASSERT_CUBLAS( cublas->SetPointerMode( cublasHandle, HIPBLAS_POINTER_MODE_DEVICE ) );
ASSERT_CUBLAS( cublas->SetStream( cublasHandle, cudaStream ) );
// Cusparse.
ASSERT_CUSPARSE( cusparse->Create( &cusparseHandle ) );
ASSERT_CUSPARSE( cusparse->SetStream( cusparseHandle, cudaStream ) );
// Constants
ASSERT_CUDA( hipGetSymbolAddress((void**)&CCudaConst::Zero, ZeroDev) );
ASSERT_CUDA( hipGetSymbolAddress((void**)&CCudaConst::One, OneDev) );
memoryPool = std::unique_ptr<CMemoryPool>( new CMemoryPool( device->MemoryLimit, this, true ) );
deviceStackRunTime = std::unique_ptr<CDeviceStackAllocator>( new CDeviceStackAllocator( *memoryPool, CudaMemoryAlignment ) );
hostStackRunTime = std::unique_ptr<CHostStackAllocator>( new CHostStackAllocator( CudaMemoryAlignment ) );
CDllLoader::Load(CDllLoader::CUDA_DLL);
}
CCudaMathEngine::~CCudaMathEngine()
{
hostStackRunTime.reset();
deviceStackRunTime.reset();
memoryPool.reset();
hipStreamDestroy( cudaStream );
cusparse->Destroy( cusparseHandle );
cublas->Destroy( cublasHandle );
CDllLoader::Free(CDllLoader::CUDA_DLL);
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
static inline void CudaFixGeom(int& minVal, int maxVal, unsigned int& geom)
{
if(minVal > maxVal) {
minVal = maxVal;
}
if(minVal > (int)geom) {
minVal = (int)geom;
}
if((int)geom > maxVal) {
geom = maxVal;
}
}
// The largest 2^N number smaller than this one (returns 1 for input 1)
static inline int GetMax2ExpLess(int value)
{
const int startExp = 16;
int expStep = startExp >> 1;
int candidate = 1 << startExp;
while(expStep > 0) {
if(candidate >= value) {
candidate >>= expStep;
} else {
candidate <<= expStep;
}
expStep >>= 1;
}
if(candidate >= value) {
candidate >>= 1;
}
return candidate;
}
static inline void CudaFixMinVals(int& minZ, int& minY, int& minX, int maxThreadCount)
{
int nextMin = 0;
while(minX * minY * minZ > maxThreadCount) {
int candidate = nextMin++ % 3;
switch(candidate) {
case 0:
minZ = GetMax2ExpLess(minZ);
break;
case 1:
minY = GetMax2ExpLess(minY);
break;
case 2:
minX = GetMax2ExpLess(minX);
break;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
int CCudaMathEngine::alignXSizeForWarp(int xSize)
{
// Align the size so it is either large than warp or smaller or equal and could be presented as 2^N
// Required for reduction with warps
int candidate = device->WarpSize;
if( xSize >= candidate ) {
return ( ( xSize + candidate - 1 ) / candidate ) * candidate;
}
int next = candidate;
do {
candidate = next;
next = next >> 1;
} while(xSize <= next);
return candidate;
}
void CCudaMathEngine::getCudaTaskGrid(int& blockCount, int& threadCount, int taskCount, int combineCount)
{
ASSERT_EXPR( taskCount > 0 );
ASSERT_EXPR( combineCount > 0 );
int runCount = (taskCount + combineCount - 1) / combineCount;
threadCount = device->ThreadMaxCount;
if(threadCount > runCount) {
threadCount = runCount;
}
blockCount = (runCount + threadCount - 1) / threadCount;
}
void CCudaMathEngine::getCudaTaskGrid2D(dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3D(dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, batchSize, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid2DMinYX(int minY, int minX, dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, minY, minX, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3DMinZYX(int minZ, int minY, int minX, dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int _maxThreadCount)
{
int maxThreadCount = min( device->ThreadMaxCount, static_cast<unsigned int>( _maxThreadCount ) );
ASSERT_EXPR(maxThreadCount >= 1);
ASSERT_EXPR(minZ > 0 && minY > 0 && minX > 0);
ASSERT_EXPR(batchSize > 0 && height > 0 && width > 0);
dim3 geom( device->ThreadMax3DCountX, device->ThreadMax3DCountY, device->ThreadMax3DCountZ );
CudaFixGeom(minX, width, geom.x);
CudaFixGeom(minY, height, geom.y);
CudaFixGeom(minZ, batchSize, geom.z);
CudaFixMinVals(minX, minY, minZ, maxThreadCount);
unsigned int optimalGridSize = INT_MAX;
threadCount = dim3(1, 1, 1);
blockCount = dim3(width, height, batchSize);
dim3 currentGeom;
unsigned int zLimit = min(geom.z * 2, maxThreadCount + 1);
for(currentGeom.z = minZ; currentGeom.z < zLimit; currentGeom.z *= 2) {
unsigned int zBlock = min(currentGeom.z, geom.z);
unsigned int zBlockCount = (batchSize + zBlock - 1) / zBlock;
unsigned int xyMaxThreadCount = maxThreadCount / currentGeom.z;
unsigned int yLimit = min(geom.y * 2, xyMaxThreadCount + 1);
for(currentGeom.y = minY; currentGeom.y < yLimit; currentGeom.y *= 2) {
currentGeom.x = xyMaxThreadCount / currentGeom.y;
if((int)currentGeom.x < minX) {
continue;
}
unsigned int yBlock = min(currentGeom.y, geom.y);
unsigned int yBlockCount = (height + yBlock - 1) / yBlock;
unsigned int xBlock = min(currentGeom.x, geom.x);
unsigned int xBlockCount = (width + xBlock - 1) / xBlock;
unsigned int gridSize = xBlockCount * yBlockCount * zBlockCount;
if(gridSize < optimalGridSize) {
optimalGridSize = gridSize;
threadCount = dim3(xBlock, yBlock, zBlock);
blockCount = dim3(xBlockCount, yBlockCount, zBlockCount);
}
}
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
| 1ffaed24299864a007165edcb3a26e29cebd4bf4.cu | /* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <DllLoader.h>
#include <CudaMathEngine.h>
#include <CudaCommon.h>
#include <CublasFunctions.h>
#include <CusparseFunctions.h>
#include <CudaDevice.h>
#include <CudaAssert.h>
#include <MathEngineCommon.h>
#include <MemoryHandleInternal.h>
#include <MathEngineDeviceStackAllocator.h>
#include <MathEngineHostStackAllocator.h>
#include <math.h>
#include <float.h>
#include <cuda_runtime.h>
namespace NeoML {
static __constant__ const float ZeroDev = 0;
static __constant__ const float OneDev = 1;
const float* CCudaConst::Zero;
const float* CCudaConst::One;
const int CudaMemoryAlignment = 4;
//------------------------------------------------------------------------------------------------------------
CCudaMathEngine::CCudaMathEngine( const CCusparse* _cusparse, const CCublas* _cublas, std::unique_ptr<CCudaDevice>& _device ) :
cusparse( _cusparse ),
cublas( _cublas ),
cudaStream( 0 ),
cublasHandle( 0 ),
cusparseHandle( 0 )
{
device.swap( _device );
// CUDA
ASSERT_EXPR( device != 0 );
ASSERT_CUDA( cudaSetDevice( device->DeviceNumber ) );
// CUDA stream.
ASSERT_CUDA( cudaStreamCreate( &cudaStream ) );
// Cublas.
ASSERT_CUBLAS( cublas->Create( &cublasHandle ) );
ASSERT_CUBLAS( cublas->SetAtomicsMode( cublasHandle, CUBLAS_ATOMICS_ALLOWED ) );
ASSERT_CUBLAS( cublas->SetPointerMode( cublasHandle, CUBLAS_POINTER_MODE_DEVICE ) );
ASSERT_CUBLAS( cublas->SetStream( cublasHandle, cudaStream ) );
// Cusparse.
ASSERT_CUSPARSE( cusparse->Create( &cusparseHandle ) );
ASSERT_CUSPARSE( cusparse->SetStream( cusparseHandle, cudaStream ) );
// Constants
ASSERT_CUDA( cudaGetSymbolAddress((void**)&CCudaConst::Zero, ZeroDev) );
ASSERT_CUDA( cudaGetSymbolAddress((void**)&CCudaConst::One, OneDev) );
memoryPool = std::unique_ptr<CMemoryPool>( new CMemoryPool( device->MemoryLimit, this, true ) );
deviceStackRunTime = std::unique_ptr<CDeviceStackAllocator>( new CDeviceStackAllocator( *memoryPool, CudaMemoryAlignment ) );
hostStackRunTime = std::unique_ptr<CHostStackAllocator>( new CHostStackAllocator( CudaMemoryAlignment ) );
CDllLoader::Load(CDllLoader::CUDA_DLL);
}
CCudaMathEngine::~CCudaMathEngine()
{
hostStackRunTime.reset();
deviceStackRunTime.reset();
memoryPool.reset();
cudaStreamDestroy( cudaStream );
cusparse->Destroy( cusparseHandle );
cublas->Destroy( cublasHandle );
CDllLoader::Free(CDllLoader::CUDA_DLL);
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
static inline void CudaFixGeom(int& minVal, int maxVal, unsigned int& geom)
{
if(minVal > maxVal) {
minVal = maxVal;
}
if(minVal > (int)geom) {
minVal = (int)geom;
}
if((int)geom > maxVal) {
geom = maxVal;
}
}
// The largest 2^N number smaller than this one (returns 1 for input 1)
static inline int GetMax2ExpLess(int value)
{
const int startExp = 16;
int expStep = startExp >> 1;
int candidate = 1 << startExp;
while(expStep > 0) {
if(candidate >= value) {
candidate >>= expStep;
} else {
candidate <<= expStep;
}
expStep >>= 1;
}
if(candidate >= value) {
candidate >>= 1;
}
return candidate;
}
static inline void CudaFixMinVals(int& minZ, int& minY, int& minX, int maxThreadCount)
{
int nextMin = 0;
while(minX * minY * minZ > maxThreadCount) {
int candidate = nextMin++ % 3;
switch(candidate) {
case 0:
minZ = GetMax2ExpLess(minZ);
break;
case 1:
minY = GetMax2ExpLess(minY);
break;
case 2:
minX = GetMax2ExpLess(minX);
break;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
int CCudaMathEngine::alignXSizeForWarp(int xSize)
{
// Align the size so it is either large than warp or smaller or equal and could be presented as 2^N
// Required for reduction with warps
int candidate = device->WarpSize;
if( xSize >= candidate ) {
return ( ( xSize + candidate - 1 ) / candidate ) * candidate;
}
int next = candidate;
do {
candidate = next;
next = next >> 1;
} while(xSize <= next);
return candidate;
}
void CCudaMathEngine::getCudaTaskGrid(int& blockCount, int& threadCount, int taskCount, int combineCount)
{
ASSERT_EXPR( taskCount > 0 );
ASSERT_EXPR( combineCount > 0 );
int runCount = (taskCount + combineCount - 1) / combineCount;
threadCount = device->ThreadMaxCount;
if(threadCount > runCount) {
threadCount = runCount;
}
blockCount = (runCount + threadCount - 1) / threadCount;
}
void CCudaMathEngine::getCudaTaskGrid2D(dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3D(dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, 1, 1, blockCount, threadCount, batchSize, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid2DMinYX(int minY, int minX, dim3& blockCount, dim3& threadCount,
int height, int width, int maxThreadCount)
{
getCudaTaskGrid3DMinZYX(1, minY, minX, blockCount, threadCount, 1, height, width, maxThreadCount);
}
void CCudaMathEngine::getCudaTaskGrid3DMinZYX(int minZ, int minY, int minX, dim3& blockCount, dim3& threadCount,
int batchSize, int height, int width, int _maxThreadCount)
{
int maxThreadCount = min( device->ThreadMaxCount, static_cast<unsigned int>( _maxThreadCount ) );
ASSERT_EXPR(maxThreadCount >= 1);
ASSERT_EXPR(minZ > 0 && minY > 0 && minX > 0);
ASSERT_EXPR(batchSize > 0 && height > 0 && width > 0);
dim3 geom( device->ThreadMax3DCountX, device->ThreadMax3DCountY, device->ThreadMax3DCountZ );
CudaFixGeom(minX, width, geom.x);
CudaFixGeom(minY, height, geom.y);
CudaFixGeom(minZ, batchSize, geom.z);
CudaFixMinVals(minX, minY, minZ, maxThreadCount);
unsigned int optimalGridSize = INT_MAX;
threadCount = dim3(1, 1, 1);
blockCount = dim3(width, height, batchSize);
dim3 currentGeom;
unsigned int zLimit = min(geom.z * 2, maxThreadCount + 1);
for(currentGeom.z = minZ; currentGeom.z < zLimit; currentGeom.z *= 2) {
unsigned int zBlock = min(currentGeom.z, geom.z);
unsigned int zBlockCount = (batchSize + zBlock - 1) / zBlock;
unsigned int xyMaxThreadCount = maxThreadCount / currentGeom.z;
unsigned int yLimit = min(geom.y * 2, xyMaxThreadCount + 1);
for(currentGeom.y = minY; currentGeom.y < yLimit; currentGeom.y *= 2) {
currentGeom.x = xyMaxThreadCount / currentGeom.y;
if((int)currentGeom.x < minX) {
continue;
}
unsigned int yBlock = min(currentGeom.y, geom.y);
unsigned int yBlockCount = (height + yBlock - 1) / yBlock;
unsigned int xBlock = min(currentGeom.x, geom.x);
unsigned int xBlockCount = (width + xBlock - 1) / xBlock;
unsigned int gridSize = xBlockCount * yBlockCount * zBlockCount;
if(gridSize < optimalGridSize) {
optimalGridSize = gridSize;
threadCount = dim3(xBlock, yBlock, zBlock);
blockCount = dim3(xBlockCount, yBlockCount, zBlockCount);
}
}
}
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
b33741ce374078adc7399986efdf561e193a8c13.hip | // !!! This is a file automatically generated by hipify!!!
// modified from
// https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu
/* Copyright 2020 The Microsoft DeepSpeed Team
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
Licensed under the MIT License.
*/
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "multi_tensor_apply.cuh"
#include "type_shim.h"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum {
ADAM_MODE_0 = 0, // L2 regularization mode
ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
using MATH_T = float;
template <typename T_g, typename T_p>
struct AdamFunctor {
__device__ __forceinline__ void operator()(
int chunk_size, volatile int *noop_gmem, TensorListMetadata<4> &tl,
const float beta1, const float beta2, const float beta1_correction,
const float beta2_correction, const float epsilon, const float lr,
adamMode_t mode, const float decay, const float div_scale) {
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T_g *g = (T_g *)tl.addresses[0][tensor_loc];
g += chunk_idx * chunk_size;
T_p *p = (T_p *)tl.addresses[1][tensor_loc];
p += chunk_idx * chunk_size;
T_p *m = (T_p *)tl.addresses[2][tensor_loc];
m += chunk_idx * chunk_size;
T_p *v = (T_p *)tl.addresses[3][tensor_loc];
v += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
// see note in multi_tensor_scale_kernel.cu
for (int i_start = 0; i_start < n && i_start < chunk_size;
i_start += blockDim.x * ILP) {
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
if (div_scale > 0) r_g[ii] /= div_scale;
if (mode == ADAM_MODE_0) { // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (lr * update);
} else { // weight decay
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (lr * update);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
void multi_tensor_adam_cuda(int chunk_size, at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr, const float beta1,
const float beta2, const float epsilon,
const int step, const int mode,
const int bias_correction, const float weight_decay,
const float div_scale) {
using namespace at;
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1) {
bias_correction1 = 1 - ::pow(beta1, step);
bias_correction2 = 1 - ::pow(beta2, step);
}
DISPATCH_FLOAT_AND_HALF_FOR_G_P(
tensor_lists[0][0].scalar_type(), tensor_lists[1][0].scalar_type(), 0,
"adam",
multi_tensor_apply<4>(BLOCK_SIZE, chunk_size, noop_flag, tensor_lists,
AdamFunctor<g_scalar_t_0, p_scalar_t_0>(), beta1,
beta2, bias_correction1, bias_correction2, epsilon,
lr, (adamMode_t)mode, weight_decay, div_scale);)
AT_CUDA_CHECK(hipGetLastError());
}
| b33741ce374078adc7399986efdf561e193a8c13.cu | // modified from
// https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu
/* Copyright 2020 The Microsoft DeepSpeed Team
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit a109f85
Licensed under the MIT License.
*/
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "multi_tensor_apply.cuh"
#include "type_shim.h"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum {
ADAM_MODE_0 = 0, // L2 regularization mode
ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
using MATH_T = float;
template <typename T_g, typename T_p>
struct AdamFunctor {
__device__ __forceinline__ void operator()(
int chunk_size, volatile int *noop_gmem, TensorListMetadata<4> &tl,
const float beta1, const float beta2, const float beta1_correction,
const float beta2_correction, const float epsilon, const float lr,
adamMode_t mode, const float decay, const float div_scale) {
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T_g *g = (T_g *)tl.addresses[0][tensor_loc];
g += chunk_idx * chunk_size;
T_p *p = (T_p *)tl.addresses[1][tensor_loc];
p += chunk_idx * chunk_size;
T_p *m = (T_p *)tl.addresses[2][tensor_loc];
m += chunk_idx * chunk_size;
T_p *v = (T_p *)tl.addresses[3][tensor_loc];
v += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
// see note in multi_tensor_scale_kernel.cu
for (int i_start = 0; i_start < n && i_start < chunk_size;
i_start += blockDim.x * ILP) {
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
if (div_scale > 0) r_g[ii] /= div_scale;
if (mode == ADAM_MODE_0) { // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (lr * update);
} else { // weight decay
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (lr * update);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
void multi_tensor_adam_cuda(int chunk_size, at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr, const float beta1,
const float beta2, const float epsilon,
const int step, const int mode,
const int bias_correction, const float weight_decay,
const float div_scale) {
using namespace at;
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1) {
bias_correction1 = 1 - std::pow(beta1, step);
bias_correction2 = 1 - std::pow(beta2, step);
}
DISPATCH_FLOAT_AND_HALF_FOR_G_P(
tensor_lists[0][0].scalar_type(), tensor_lists[1][0].scalar_type(), 0,
"adam",
multi_tensor_apply<4>(BLOCK_SIZE, chunk_size, noop_flag, tensor_lists,
AdamFunctor<g_scalar_t_0, p_scalar_t_0>(), beta1,
beta2, bias_correction1, bias_correction2, epsilon,
lr, (adamMode_t)mode, weight_decay, div_scale);)
AT_CUDA_CHECK(cudaGetLastError());
}
|
ad1a9f6f4c6c7d61534035d63a9c8fc1e926e77d.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run matrix multiplication kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Volta GPU.
Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU
easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set matrices will be used to compute
output of matrix multiplication.
First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for
GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the
rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise
operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for
alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on
Volta and they support only half-precision floating point (fp16 or half), we use data type for
elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot
product to fp32, which can store wider range of numbers, we use it as data type of output matrix
elements and accumulation. We convey this to CUTLASS kernel by initializing template variables
ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t),
ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not
enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do
that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB
to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C
which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the
data type of output ElementOutput (int32_t), the number of elements per vector memory access (16),
data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X +
beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32,
64x64x32, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally
deduce the amount of threads needed per thread-block, amount of shared memory, storing data in
bank-conflict free manner, and ton of other variables required to compose, intialize and launch a
high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from
understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a CTA. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma pipeline.
matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers ->
output to global memory
The problem with single pipeline is, each stage is synchronous which means, each stage has to wait
until the previous finished executing. There are stages in the pipeline which do not have fixed
latency, for example, the loads from global memory and shared memory. Therefore, we can add one more
pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5)
mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global
memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers ->
(9) output to global memory
This way, you can hide the second global memoroy load latency by doing computation on already loaded
input data.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS GEMM kernel using
cutlass::gemm::device::Gemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if
the output from CUTLASS kernel is same as reference GEMM kernel.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm70;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, N = 8, K = 4
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes ?
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run() {
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (props.major != 7) {
std::cerr << "Volta Tensor Ops must be run on a machine with compute capability of 70, 72, or 75."
<< std::endl;
// Return 0 so tests are considered passing if run on unsupported architectures or CUDA Toolkits.
return 0;
}
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
// Volta Tensor Core operations exposed with mma.sync are first available in CUDA 10.1.
//
// CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero when built on older Toolkits so tests pass. The actions of this SDK example are no-op.
return 0;
}
else {
return run();
}
}
| ad1a9f6f4c6c7d61534035d63a9c8fc1e926e77d.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run matrix multiplication kernels using functions and data structures
provided by CUTLASS using tensor cores; which we run on a NVIDIA Volta GPU.
Writing a single high performance matrix multiplication kernel is hard but do-able. Whereas writing
high performance kernels at scale which works for multiple problem sizes with good abstractions is
really hard. CUTLASS solves this problem by providing simplified abstractions to compose
multiple sections of gemm kernel. When used properly, the kernels can hit peak performance of GPU
easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at each thread, warp
and thread-block level, they compute on their own tile-size with higher level of tile sizes being
composed from lower level ones. Multiple thread-tiles (tile size each thread computes) can be used
to form warp-tiles (tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how matrices are laid out in the memory and how the kernel
can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set matrices will be used to compute
output of matrix multiplication.
First, we setup the data types of matrices A, B, C and D along with alpha, beta as the equation for
GEMM is D = alpha * A * B + beta * C. In CUTLASS, the kernels first compute A * B and leaves the
rest of the computation to end of the kernel as alpha * X + beta * C is a simple element-wise
operation on X (A * B) and C. We call this as epilogue of kernel. Hence, we setup data types for
alpha and beta to be equal to ElementComputeEpilogue = float. As we want to MMA instructions on
Volta and they support only half-precision floating point (fp16 or half), we use data type for
elements in input matrix A and B as cutlass::half_t. Volta also supports accumulation of partial dot
product to fp32, which can store wider range of numbers, we use it as data type of output matrix
elements and accumulation. We convey this to CUTLASS kernel by initializing template variables
ElementAccumulator (float), ElementComputeEpilogue (float), ElementInputA (cutlass::half_t),
ElementInputB (cutlass::half_t), ElementOutput (float). Communicating just the data type is not
enough. As the data is laid out linearly in memory, we have to convey the layout of matrices. We do
that by initializing template variable LayoutInputA to column major cutlass variable, LayoutInputB
to row major and LayoutOutput to row major. Next, we setup rules to comptue alpha * X + beta * C
which is called epilogue of the kernel. We initialize template variable EpilogueOp, which takes the
data type of output ElementOutput (int32_t), the number of elements per vector memory access (16),
data type of accumulator (int32_t) and data type of computation of linear combination (alpha * X +
beta * C).
Now that we setup the properties of data, we have to setup properties of computation.
Second, we create template variables of tile sizes for thread-block, warp and mma-op to 128x128x32,
64x64x32, 8x8x4 (MxNxK) respectively. When passed to instantiate CUTLASS GEMM kernel, it internally
deduce the amount of threads needed per thread-block, amount of shared memory, storing data in
bank-conflict free manner, and ton of other variables required to compose, intialize and launch a
high performance GEMM kernel. This is the beauty of CUTLASS, it relieves developer from
understanding and coding complicated hardware optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a CTA. What are MMA pipelines? MMA pipelines
constitute the whole process of loading input data from global memory to shared memory, loading data
from shared memory to registers, doing matrix multiplication, store to global memory. The below flow
sequence shows a typical mma pipeline.
matrix in global memory -> registers -> tile in shared memory -> registers -> mma -> registers ->
output to global memory
The problem with single pipeline is, each stage is synchronous which means, each stage has to wait
until the previous finished executing. There are stages in the pipeline which do not have fixed
latency, for example, the loads from global memory and shared memory. Therefore, we can add one more
pipeline with a phase shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4) registers -> (5)
mma -> (6) registers -> (7) output to global memory (1) <null> -> (2) <null> -> (3) matrix in global
memory -> (4) registers -> (5) tile in shared memory -> (6) registers -> (7) mma -> (8) registers ->
(9) output to global memory
This way, you can hide the second global memoroy load latency by doing computation on already loaded
input data.
There are few more template variables initialized such as, which threadblock tile of output matrix
is done which threadblock launched on an SM, CUDA SM architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS GEMM kernel using
cutlass::gemm::device::Gemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS kernel and run it.
We use CUTLASS utilities to initialize, fill, compare matrices as they are simple and doesn't come
in the way of learning CUTLASS.
Once all the matrices are initialized and filled with data, create arguments tuple to launch CUTLASS
kernel which takes problem size (M = 5120, N = 4096 and K = 4096), matrices, alpha, beta and the
important one, split k-dimension factor. Along with that, we query CUTLASS if any scratch-space
memory required by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference gemm kernel (from CUTLASS utilities) to compare if
the output from CUTLASS kernel is same as reference GEMM kernel.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm70;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8, N = 8, K = 4
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes ?
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run() {
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major != 7) {
std::cerr << "Volta Tensor Ops must be run on a machine with compute capability of 70, 72, or 75."
<< std::endl;
// Return 0 so tests are considered passing if run on unsupported architectures or CUDA Toolkits.
return 0;
}
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
// Volta Tensor Core operations exposed with mma.sync are first available in CUDA 10.1.
//
// CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero when built on older Toolkits so tests pass. The actions of this SDK example are no-op.
return 0;
}
else {
return run();
}
}
|
01ca490140682b604af8a3073c7ddc2f124fbaf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
long long N=(1000*1000*8);
if (argc>1) {
N=atoll(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
hipMalloc((void **)&dev_x,N*sizeof(float));
hipMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
hipMemcpy(dev_x,x,N*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(dev_y,y,N*sizeof(float),hipMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
a=5.0;
/* Perform SAXPY */
hipLaunchKernelGGL(( saxpy), dim3(1),dim3(N), 0, 0, N,a,dev_x,dev_y);
// make the host block until the device is finished
hipDeviceSynchronize();
// check for error
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipMemcpy(y,dev_y,N*sizeof(float),hipMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f, y[%lld]=%f\n",i,y[i],N-1,y[N-1]);
/* y[i]=a*x[i]+y[i] */
/* 0: a=5, x=0, y=0 ::::::: y=0 */
/* 1: a=5, x=1, y=10 ::::::: y=15 */
/* 2: a=5, x=2, y=20 ::::::: y=30 */
/* 3: a=5, x=3, y=30 ::::::: y=45 */
/* 4: a=5, x=4, y=40 ::::::: y=60 */
/* ... */
/* 100: a=5, x=100, y=1000 y=1500 */
hipFree(dev_x);
hipFree(dev_y);
return 0;
}
| 01ca490140682b604af8a3073c7ddc2f124fbaf5.cu | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i;
float *x, *y, *dev_x, *dev_y;
float a;
long long N=(1000*1000*8);
if (argc>1) {
N=atoll(argv[1]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
cudaMalloc((void **)&dev_x,N*sizeof(float));
cudaMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
cudaMemcpy(dev_x,x,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_y,y,N*sizeof(float),cudaMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
a=5.0;
/* Perform SAXPY */
saxpy<<<1,N>>>(N,a,dev_x,dev_y);
// make the host block until the device is finished
cudaDeviceSynchronize();
// check for error
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(y,dev_y,N*sizeof(float),cudaMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f, y[%lld]=%f\n",i,y[i],N-1,y[N-1]);
/* y[i]=a*x[i]+y[i] */
/* 0: a=5, x=0, y=0 ::::::: y=0 */
/* 1: a=5, x=1, y=10 ::::::: y=15 */
/* 2: a=5, x=2, y=20 ::::::: y=30 */
/* 3: a=5, x=3, y=30 ::::::: y=45 */
/* 4: a=5, x=4, y=40 ::::::: y=60 */
/* ... */
/* 100: a=5, x=100, y=1000 y=1500 */
cudaFree(dev_x);
cudaFree(dev_y);
return 0;
}
|
additive_smoothl1_layer.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/additive_smoothl1_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdditiveSmoothL1Forward(const int n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
// |x| - 0.5 / sigma / sigma otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val > 1.0 / sigma2) {
out[index] = val > 0 ? 1./sigma2 : -1./sigma2;
}
}
}
template <typename Dtype>
void AdditiveSmoothL1Layer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
top[0]->mutable_gpu_data()); // d := b0 - b1
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AdditiveSmoothL1Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top[0]->gpu_data(), top[0]->mutable_gpu_data(), sigma2_);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void AdditiveSmoothL1Layer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0] || propagate_down[1] ){
NOT_IMPLEMENTED;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AdditiveSmoothL1Layer);
} // namespace caffe
| additive_smoothl1_layer.cu | #include <vector>
#include "caffe/layers/additive_smoothl1_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdditiveSmoothL1Forward(const int n, const Dtype* in, Dtype* out,
Dtype sigma2) {
// f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
// |x| - 0.5 / sigma / sigma otherwise
CUDA_KERNEL_LOOP(index, n) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val > 1.0 / sigma2) {
out[index] = val > 0 ? 1./sigma2 : -1./sigma2;
}
}
}
template <typename Dtype>
void AdditiveSmoothL1Layer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
top[0]->mutable_gpu_data()); // d := b0 - b1
// NOLINT_NEXT_LINE(whitespace/operators)
AdditiveSmoothL1Forward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, top[0]->gpu_data(), top[0]->mutable_gpu_data(), sigma2_);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void AdditiveSmoothL1Layer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0] || propagate_down[1] ){
NOT_IMPLEMENTED;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AdditiveSmoothL1Layer);
} // namespace caffe
|
1b645f78301978b7e22faf02a8325c5de7f9f706.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
//#include<cuda.h>
#include<cuda_runtime.h>
#define N 4
#define BLOCK_DIM 4
__global__ void matrixAdd (int *dev_a);
int main() {
int a[N*N]={};
int i;
for(i=0;i<16;i++)
{
printf("enter the number");
scanf("%d",&a[i]);
}
int *dev_a;
//int dev_b;
int size = N * N * sizeof(int);
// initialize a and b with real values (NOT SHOWN)
hipMalloc((void**)&dev_a, size);
//hipMalloc((void**)&dev_b, size);
//hipMalloc((void**)&dev_c, size);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
//hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));hipLaunchKernelGGL((
matrixAdd), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_a);
hipMemcpy(a, dev_a, size, hipMemcpyDeviceToHost);
hipFree(dev_a);
//hipFree(dev_b);
}
__global__ void matrixAdd (int *dev_a) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col + row * N;
//dev_b=index;
//}
if (col < N && row < N) {
//c[index] = a[index] + b[index];
printf("%d",dev_a[index]);
}
}
| 1b645f78301978b7e22faf02a8325c5de7f9f706.cu | #include<stdio.h>
//#include<cuda.h>
#include<cuda_runtime.h>
#define N 4
#define BLOCK_DIM 4
__global__ void matrixAdd (int *dev_a);
int main() {
int a[N*N]={};
int i;
for(i=0;i<16;i++)
{
printf("enter the number");
scanf("%d",&a[i]);
}
int *dev_a;
//int dev_b;
int size = N * N * sizeof(int);
// initialize a and b with real values (NOT SHOWN)
cudaMalloc((void**)&dev_a, size);
//cudaMalloc((void**)&dev_b, size);
//cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
//cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
matrixAdd<<<dimGrid,dimBlock>>>(dev_a);
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
//cudaFree(dev_b);
}
__global__ void matrixAdd (int *dev_a) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col + row * N;
//dev_b=index;
//}
if (col < N && row < N) {
//c[index] = a[index] + b[index];
printf("%d",dev_a[index]);
}
}
|
d78263a378a5d86b28a4b4ec1029462ff22a7e64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<bits/stdc++.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
__global__ void kernel_distance(double* dfeature_arr,int d_numfeatures,int d_query,double* d_dist, double* d_label,int k)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int tid = threadIdx.x;
__shared__ double d_arr[258];
__shared__ int check[258];
__shared__ double query_obj[50]; // 50 -> num_features
double d1, d2;
check[tid] = 1;
int rank,j;
int start = id*d_numfeatures;
int end = start + d_numfeatures-1;
int i;
double dis = 0;
int start_q = (d_query-1)*d_numfeatures;
if(tid<d_numfeatures)
query_obj[tid] = dfeature_arr[start_q + tid];
__syncthreads();
for(i = start, j= 0;i<end;i++, j++)
{
d1 = dfeature_arr[i];
d2 = query_obj[j];
dis += (d1 - d2)*(d1 - d2);
//start_q++;
}
//d_dist[id] = dis;
d_arr[tid] = dis;
//printf("\n Label = %lf",dfeature_arr[end]);
__syncthreads();
rank = 0;
for(i=0;i<256;i++)
{
//if(check[i]==1 && dis>d_arr[i]&& i!=tid)
// rank++;
if(check[i]==1 &&i!=tid )
{
if(dis>d_arr[i])
rank++;
if(dis==d_arr[i]&&tid>i)
rank++;
}
}
if(rank<k)
{
d_dist[blockIdx.x*k+rank] = dis;
d_label[blockIdx.x*k + rank] = dfeature_arr[end];
//printf("\n BlockID = %d Rank = %d dist = %lf label = %lf",blockIdx.x,rank,dis,d_label[blockIdx.x*k + rank]);
}
}
const char* getfield(char* line, int num)
{
const char* tok;
for (tok = strtok(line, ";");
tok && *tok;
tok = strtok(NULL, ";\n"))
{
if (!--num)
return tok;
}
return NULL;
}
main()
{
FILE* stream = fopen("winequality-red.csv", "r");
char line[1024];
int cnt = 1,num_features,itr =-1,i,index = 0;
double *feature_arr;
feature_arr = (double *)malloc(sizeof(double)*250000);
while (fgets(line, 1024, stream)!=NULL)
{
char* tmp = strdup(line);
if(itr==-1)
{
while(getfield(tmp,cnt)!=NULL)
{
cnt++;
tmp = strdup(line);
}
num_features = cnt -1;
printf("\n Number of features = %d",num_features);
itr++;
}
else
{
for(i=1;i<=num_features;i++)
{
//printf("feaure cnt = %d",i);
feature_arr[index] = atof(getfield(tmp,i));
index++;
tmp = strdup(line);
}
itr++;
}
free(tmp);
}
fclose(stream);
printf("\n Reading done");
double *dfeature_arr, *d_dist,*d_label;
hipEvent_t st, stop;
hipEventCreate(&st);
hipEventCreate(&stop);
hipMalloc((void **)&dfeature_arr,itr*num_features*sizeof(double));
hipMemcpy(dfeature_arr,feature_arr,itr*num_features*sizeof(double),hipMemcpyHostToDevice);
int train = 0.8*(float)itr;
printf("\n %d",train);
int query = train + 10;
int k ;
//printf("\nEnter value of k - ");
//scanf("%d",&k);
//for(k=3;k<=20;k++){
double h_dist[train];
double h_label[train];
hipMalloc((void **)&d_dist,train*sizeof(double));
hipMalloc((void **)&d_label,train*sizeof(double));
int num_threads =256;
int num_blocks = ceil((float)train/num_threads);
for(k=3;k<=20;k++){
hipEventRecord(st);
hipLaunchKernelGGL(( kernel_distance), dim3(num_blocks),dim3(num_threads), 0, 0, dfeature_arr,num_features,query,d_dist,d_label,k);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, st, stop);
hipMemcpy(h_dist,d_dist,sizeof(double)*train,hipMemcpyDeviceToHost);
hipMemcpy(h_label,d_label,sizeof(double)*train,hipMemcpyDeviceToHost);
int start[num_blocks];
double knn[k],mi;
int kid = 0;
int j,ind;
/*for(i=0;i<k*num_blocks;i++)
{
printf("\n %lf %lf",h_label[i],h_dist[i]);
}*/
clock_t s, e;
s = clock();
for(i=0;i<num_blocks;i++)
{
start[i] = k*i;
//printf("\n i = %d, start = %d",i,start[i]);
}
for(i=0;i<k;i++)
{
mi = 1000;
for(j=0;j<num_blocks;j++)
{
if(mi>h_dist[start[j]])
{
mi = h_dist[start[j]];
ind = j;
}
}
//start[j]++;
//printf("\n Distance = %lf label = %lf",mi,h_label[start[ind]]);
knn[kid] = h_label[start[ind]];
kid++;
start[ind]++;
}
double sum=0.0;
for(i=0;i<k;i++)
{
//printf("\n result = %lf",h_label[i]);
sum+=knn[i];
}
//printf("\n Sum = %lf",sum);
sum = sum/(double)k;
e = clock();
//printf("\nLabel = %lf",sum);
printf("\n %d\t%lf",k,((double) (e - s))* 1000.0 / CLOCKS_PER_SEC + (double)milliseconds);}
}
| d78263a378a5d86b28a4b4ec1029462ff22a7e64.cu | #include<bits/stdc++.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
__global__ void kernel_distance(double* dfeature_arr,int d_numfeatures,int d_query,double* d_dist, double* d_label,int k)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int tid = threadIdx.x;
__shared__ double d_arr[258];
__shared__ int check[258];
__shared__ double query_obj[50]; // 50 -> num_features
double d1, d2;
check[tid] = 1;
int rank,j;
int start = id*d_numfeatures;
int end = start + d_numfeatures-1;
int i;
double dis = 0;
int start_q = (d_query-1)*d_numfeatures;
if(tid<d_numfeatures)
query_obj[tid] = dfeature_arr[start_q + tid];
__syncthreads();
for(i = start, j= 0;i<end;i++, j++)
{
d1 = dfeature_arr[i];
d2 = query_obj[j];
dis += (d1 - d2)*(d1 - d2);
//start_q++;
}
//d_dist[id] = dis;
d_arr[tid] = dis;
//printf("\n Label = %lf",dfeature_arr[end]);
__syncthreads();
rank = 0;
for(i=0;i<256;i++)
{
//if(check[i]==1 && dis>d_arr[i]&& i!=tid)
// rank++;
if(check[i]==1 &&i!=tid )
{
if(dis>d_arr[i])
rank++;
if(dis==d_arr[i]&&tid>i)
rank++;
}
}
if(rank<k)
{
d_dist[blockIdx.x*k+rank] = dis;
d_label[blockIdx.x*k + rank] = dfeature_arr[end];
//printf("\n BlockID = %d Rank = %d dist = %lf label = %lf",blockIdx.x,rank,dis,d_label[blockIdx.x*k + rank]);
}
}
const char* getfield(char* line, int num)
{
const char* tok;
for (tok = strtok(line, ";");
tok && *tok;
tok = strtok(NULL, ";\n"))
{
if (!--num)
return tok;
}
return NULL;
}
main()
{
FILE* stream = fopen("winequality-red.csv", "r");
char line[1024];
int cnt = 1,num_features,itr =-1,i,index = 0;
double *feature_arr;
feature_arr = (double *)malloc(sizeof(double)*250000);
while (fgets(line, 1024, stream)!=NULL)
{
char* tmp = strdup(line);
if(itr==-1)
{
while(getfield(tmp,cnt)!=NULL)
{
cnt++;
tmp = strdup(line);
}
num_features = cnt -1;
printf("\n Number of features = %d",num_features);
itr++;
}
else
{
for(i=1;i<=num_features;i++)
{
//printf("feaure cnt = %d",i);
feature_arr[index] = atof(getfield(tmp,i));
index++;
tmp = strdup(line);
}
itr++;
}
free(tmp);
}
fclose(stream);
printf("\n Reading done");
double *dfeature_arr, *d_dist,*d_label;
cudaEvent_t st, stop;
cudaEventCreate(&st);
cudaEventCreate(&stop);
cudaMalloc((void **)&dfeature_arr,itr*num_features*sizeof(double));
cudaMemcpy(dfeature_arr,feature_arr,itr*num_features*sizeof(double),cudaMemcpyHostToDevice);
int train = 0.8*(float)itr;
printf("\n %d",train);
int query = train + 10;
int k ;
//printf("\nEnter value of k - ");
//scanf("%d",&k);
//for(k=3;k<=20;k++){
double h_dist[train];
double h_label[train];
cudaMalloc((void **)&d_dist,train*sizeof(double));
cudaMalloc((void **)&d_label,train*sizeof(double));
int num_threads =256;
int num_blocks = ceil((float)train/num_threads);
for(k=3;k<=20;k++){
cudaEventRecord(st);
kernel_distance<<<num_blocks,num_threads>>>(dfeature_arr,num_features,query,d_dist,d_label,k);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, st, stop);
cudaMemcpy(h_dist,d_dist,sizeof(double)*train,cudaMemcpyDeviceToHost);
cudaMemcpy(h_label,d_label,sizeof(double)*train,cudaMemcpyDeviceToHost);
int start[num_blocks];
double knn[k],mi;
int kid = 0;
int j,ind;
/*for(i=0;i<k*num_blocks;i++)
{
printf("\n %lf %lf",h_label[i],h_dist[i]);
}*/
clock_t s, e;
s = clock();
for(i=0;i<num_blocks;i++)
{
start[i] = k*i;
//printf("\n i = %d, start = %d",i,start[i]);
}
for(i=0;i<k;i++)
{
mi = 1000;
for(j=0;j<num_blocks;j++)
{
if(mi>h_dist[start[j]])
{
mi = h_dist[start[j]];
ind = j;
}
}
//start[j]++;
//printf("\n Distance = %lf label = %lf",mi,h_label[start[ind]]);
knn[kid] = h_label[start[ind]];
kid++;
start[ind]++;
}
double sum=0.0;
for(i=0;i<k;i++)
{
//printf("\n result = %lf",h_label[i]);
sum+=knn[i];
}
//printf("\n Sum = %lf",sum);
sum = sum/(double)k;
e = clock();
//printf("\nLabel = %lf",sum);
printf("\n %d\t%lf",k,((double) (e - s))* 1000.0 / CLOCKS_PER_SEC + (double)milliseconds);}
}
|
2b221e1e99f5ff6089b8f9e7fdaf6984ed83930b.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__
void ChamferDistanceKernel(
int b,
int n,
const float* xyz,
int m,
const float* xyz2,
float* result,
int* result_i)
{
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void ChamferDistanceKernelLauncher(
const int b, const int n,
const float* xyz,
const int m,
const float* xyz2,
float* result,
int* result_i,
float* result2,
int* result2_i)
{
hipLaunchKernelGGL(( ChamferDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b, n, xyz, m, xyz2, result, result_i);
hipLaunchKernelGGL(( ChamferDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b, m, xyz2, n, xyz, result2, result2_i);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("error in chamfer distance updateOutput: %s\n", hipGetErrorString(err));
}
__global__
void ChamferDistanceGradKernel(
int b, int n,
const float* xyz1,
int m,
const float* xyz2,
const float* grad_dist1,
const int* idx1,
float* grad_xyz1,
float* grad_xyz2)
{
for (int i = blockIdx.x; i<b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x*gridDim.y) {
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void ChamferDistanceGradKernelLauncher(
const int b, const int n,
const float* xyz1,
const int m,
const float* xyz2,
const float* grad_dist1,
const int* idx1,
const float* grad_dist2,
const int* idx2,
float* grad_xyz1,
float* grad_xyz2)
{
hipMemset(grad_xyz1, 0, b*n*3*4);
hipMemset(grad_xyz2, 0, b*m*3*4);
hipLaunchKernelGGL(( ChamferDistanceGradKernel), dim3(dim3(1,16,1)), dim3(256), 0, 0, b, n, xyz1, m, xyz2, grad_dist1, idx1, grad_xyz1, grad_xyz2);
hipLaunchKernelGGL(( ChamferDistanceGradKernel), dim3(dim3(1,16,1)), dim3(256), 0, 0, b, m, xyz2, n, xyz1, grad_dist2, idx2, grad_xyz2, grad_xyz1);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("error in chamfer distance get grad: %s\n", hipGetErrorString(err));
} | 2b221e1e99f5ff6089b8f9e7fdaf6984ed83930b.cu | #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__
void ChamferDistanceKernel(
int b,
int n,
const float* xyz,
int m,
const float* xyz2,
float* result,
int* result_i)
{
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void ChamferDistanceKernelLauncher(
const int b, const int n,
const float* xyz,
const int m,
const float* xyz2,
float* result,
int* result_i,
float* result2,
int* result2_i)
{
ChamferDistanceKernel<<<dim3(32,16,1),512>>>(b, n, xyz, m, xyz2, result, result_i);
ChamferDistanceKernel<<<dim3(32,16,1),512>>>(b, m, xyz2, n, xyz, result2, result2_i);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("error in chamfer distance updateOutput: %s\n", cudaGetErrorString(err));
}
__global__
void ChamferDistanceGradKernel(
int b, int n,
const float* xyz1,
int m,
const float* xyz2,
const float* grad_dist1,
const int* idx1,
float* grad_xyz1,
float* grad_xyz2)
{
for (int i = blockIdx.x; i<b; i += gridDim.x) {
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x*gridDim.y) {
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void ChamferDistanceGradKernelLauncher(
const int b, const int n,
const float* xyz1,
const int m,
const float* xyz2,
const float* grad_dist1,
const int* idx1,
const float* grad_dist2,
const int* idx2,
float* grad_xyz1,
float* grad_xyz2)
{
cudaMemset(grad_xyz1, 0, b*n*3*4);
cudaMemset(grad_xyz2, 0, b*m*3*4);
ChamferDistanceGradKernel<<<dim3(1,16,1), 256>>>(b, n, xyz1, m, xyz2, grad_dist1, idx1, grad_xyz1, grad_xyz2);
ChamferDistanceGradKernel<<<dim3(1,16,1), 256>>>(b, m, xyz2, n, xyz1, grad_dist2, idx2, grad_xyz2, grad_xyz1);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("error in chamfer distance get grad: %s\n", cudaGetErrorString(err));
} |
fcefda0dcbfb96f5fd42d04852ed3cf52c13360e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
#ifdef OPENCV
#include "opencv2/highgui/highgui_c.h"
#endif
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
if(net.wait_stream)
hipStreamSynchronize(get_cuda_stream());
state.input = l.output_gpu;
/*
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) {
int j;
for (j = 0; j < l.out_c; ++j) {
image img = make_image(l.out_w, l.out_h, 3);
memcpy(img.data, l.output+ l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
char buff[256];
sprintf(buff, "layer-%d slice-%d", i, j);
show_image(img, buff);
}
cvWaitKey(0); // wait press-key in console
cvDestroyAllWindows();
}
*/
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if (l.stopbackward) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
#ifdef CUDNN_HALF
int i;
for (i = 0; i < net.n; ++i) {
layer l = net.layers[i];
cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16);
}
#endif
forward_network_gpu(net, state);
//hipStreamSynchronize(get_cuda_stream());
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
if (net.gpu_index != cuda_get_device())
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
printf("size = %d.\n",size);
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
| fcefda0dcbfb96f5fd42d04852ed3cf52c13360e.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
#ifdef OPENCV
#include "opencv2/highgui/highgui_c.h"
#endif
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
if(net.wait_stream)
cudaStreamSynchronize(get_cuda_stream());
state.input = l.output_gpu;
/*
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) {
int j;
for (j = 0; j < l.out_c; ++j) {
image img = make_image(l.out_w, l.out_h, 3);
memcpy(img.data, l.output+ l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
char buff[256];
sprintf(buff, "layer-%d slice-%d", i, j);
show_image(img, buff);
}
cvWaitKey(0); // wait press-key in console
cvDestroyAllWindows();
}
*/
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if (l.stopbackward) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
#ifdef CUDNN_HALF
int i;
for (i = 0; i < net.n; ++i) {
layer l = net.layers[i];
cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16);
}
#endif
forward_network_gpu(net, state);
//cudaStreamSynchronize(get_cuda_stream());
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
if (net.gpu_index != cuda_get_device())
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
printf("size = %d.\n",size);
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
|
f3613ff7c129b3540e43847e36b9fb311570e644.hip | // !!! This is a file automatically generated by hipify!!!
// n should be less than 10000 when k==3
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void parallel_max_each_chunk(float *dmaxarr, int *dmaxstart, int *dmaxend,float * darr, int n, int k);
void check(int n, int numBlock, float *smaxarr, float *maxarr, float *arr);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)i;
}
const int numthreadsBlock = 8;
int numChunk;
numChunk = ( n + numthreadsBlock - 1)/numthreadsBlock;
float *maxarr = (float *)malloc(numChunk * sizeof(float));
int *maxstart = (int *)malloc(numChunk * sizeof(int));
int *maxend = (int *)malloc(numChunk * sizeof(int));
int numBlock = numChunk;
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
hipMalloc((void **)&darr, n*sizeof(float));
hipMalloc((void **)&dmaxarr, numChunk*sizeof(float));
hipMalloc((void **)&dmaxstart, numChunk*sizeof(int));
hipMalloc((void **)&dmaxend, numChunk*sizeof(int));
hipMemcpy(darr, arr, n*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
hipLaunchKernelGGL(( parallel_max_each_chunk), dim3(dimGrid),dim3(dimBlock),(n+3*numthreadsBlock)*sizeof(float), 0, dmaxarr,dmaxstart,dmaxend, darr, n, k);
hipDeviceSynchronize();
hipMemcpy(maxarr, dmaxarr, numChunk*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(maxstart, dmaxstart, numChunk*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(maxend, dmaxend, numChunk*sizeof(int), hipMemcpyDeviceToHost);
//truth
float *smaxarr = (float *)malloc(numChunk*sizeof(float));
for (i = 0; i < numChunk; i ++) {
smaxarr[i] = i*numthreadsBlock + k <=n? arr[i*numthreadsBlock + k/2 ]:0; // k is an odd number
}
// compare the truth with results by kernel
check(n,numBlock, smaxarr, maxarr,arr);
printf("max start %d, %d\n", maxstart[0], maxend[0]);
// check the exit state of CUDA code
hipError_t error = hipGetLastError();
if (error !=hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
}
//free gpu memory
hipFree(dmaxarr);
hipFree(darr);
return 0;
}
__global__ void parallel_max_each_chunk(float *dmaxarr,int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
int i, tid = threadIdx.x;
//copy the whole series to shared memory
//always round up and if n is a multiple of blockDim.x no rounding
int chunkSize = (n+blockDim.x-1)/blockDim.x;
extern __shared__ float sdata[];
for (i = 0; i < chunkSize; i++) {
if (tid * chunkSize + i <n)
sdata[tid*chunkSize + i ] = darr[tid*chunkSize + i];
}
__syncthreads();
// declare three arrays for the maximum found by each thread
extern __shared__ float mymaxvals[];
extern __shared__ int mystartmaxes[];
extern __shared__ int myendmaxes[];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += sdata[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
// mystartmaxes[tid] = perstart;
// myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + sdata[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next longer subsequence has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
// the maximum among the mymaxvals in this block
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
//check the results
void check(int n, int numBlock, float *smaxarr, float *maxarr, float *arr) {
bool judge = true;
for (int i=0; i < numBlock; i++) {
printf("max of block %d, %f %f\n ", i, smaxarr[i], maxarr[i]);
judge = judge && (smaxarr[i] == maxarr[i]);
}
printf("\n--------correct or wrong---------\n");
printf(judge ? "right\n": "wrong\n");
printf("\n--------1d array---------\n");
if ( n < 15) {
for (int i=0; i < n; i++) {
printf("element %d, %f\n ", i, arr[i]);
}
}
}
| f3613ff7c129b3540e43847e36b9fb311570e644.cu | // n should be less than 10000 when k==3
#include <stdio.h>
#include <cuda.h>
__global__ void parallel_max_each_chunk(float *dmaxarr, int *dmaxstart, int *dmaxend,float * darr, int n, int k);
void check(int n, int numBlock, float *smaxarr, float *maxarr, float *arr);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)i;
}
const int numthreadsBlock = 8;
int numChunk;
numChunk = ( n + numthreadsBlock - 1)/numthreadsBlock;
float *maxarr = (float *)malloc(numChunk * sizeof(float));
int *maxstart = (int *)malloc(numChunk * sizeof(int));
int *maxend = (int *)malloc(numChunk * sizeof(int));
int numBlock = numChunk;
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
cudaMalloc((void **)&darr, n*sizeof(float));
cudaMalloc((void **)&dmaxarr, numChunk*sizeof(float));
cudaMalloc((void **)&dmaxstart, numChunk*sizeof(int));
cudaMalloc((void **)&dmaxend, numChunk*sizeof(int));
cudaMemcpy(darr, arr, n*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
parallel_max_each_chunk<<<dimGrid,dimBlock,(n+3*numthreadsBlock)*sizeof(float)>>>(dmaxarr,dmaxstart,dmaxend, darr, n, k);
cudaThreadSynchronize();
cudaMemcpy(maxarr, dmaxarr, numChunk*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(maxstart, dmaxstart, numChunk*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(maxend, dmaxend, numChunk*sizeof(int), cudaMemcpyDeviceToHost);
//truth
float *smaxarr = (float *)malloc(numChunk*sizeof(float));
for (i = 0; i < numChunk; i ++) {
smaxarr[i] = i*numthreadsBlock + k <=n? arr[i*numthreadsBlock + k/2 ]:0; // k is an odd number
}
// compare the truth with results by kernel
check(n,numBlock, smaxarr, maxarr,arr);
printf("max start %d, %d\n", maxstart[0], maxend[0]);
// check the exit state of CUDA code
cudaError_t error = cudaGetLastError();
if (error !=cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
}
//free gpu memory
cudaFree(dmaxarr);
cudaFree(darr);
return 0;
}
__global__ void parallel_max_each_chunk(float *dmaxarr,int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
int i, tid = threadIdx.x;
//copy the whole series to shared memory
//always round up and if n is a multiple of blockDim.x no rounding
int chunkSize = (n+blockDim.x-1)/blockDim.x;
extern __shared__ float sdata[];
for (i = 0; i < chunkSize; i++) {
if (tid * chunkSize + i <n)
sdata[tid*chunkSize + i ] = darr[tid*chunkSize + i];
}
__syncthreads();
// declare three arrays for the maximum found by each thread
extern __shared__ float mymaxvals[];
extern __shared__ int mystartmaxes[];
extern __shared__ int myendmaxes[];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += sdata[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
// mystartmaxes[tid] = perstart;
// myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + sdata[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next longer subsequence has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
// the maximum among the mymaxvals in this block
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
//check the results
void check(int n, int numBlock, float *smaxarr, float *maxarr, float *arr) {
bool judge = true;
for (int i=0; i < numBlock; i++) {
printf("max of block %d, %f %f\n ", i, smaxarr[i], maxarr[i]);
judge = judge && (smaxarr[i] == maxarr[i]);
}
printf("\n--------correct or wrong---------\n");
printf(judge ? "right\n": "wrong\n");
printf("\n--------1d array---------\n");
if ( n < 15) {
for (int i=0; i < n; i++) {
printf("element %d, %f\n ", i, arr[i]);
}
}
}
|
de2475b318a53f35981c6dc921583da88eeac910.hip | // !!! This is a file automatically generated by hipify!!!
//Author: Alexander G. Schwing (http://alexander-schwing.de)
//Author: Liang-Chieh (Jay) Chen (http://www.cs.ucla.edu/~lcchen/)
#ifdef _MSC_VER
#pragma warning( disable : 4661 )
#endif
#include "Function_Softmax.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "../LSDN_CudaCommon.h"
#include "LSDN_mathfunctions.h"
template <typename T>
__global__ void kernel_get_max(const int num, const int dim, const T* data, T* out) {
CUDA_KERNEL_LOOP(index, num) {
T maxval = data[index*dim];
for (int i = 1; i < dim; ++i) {
maxval = max(data[index * dim + i], maxval);
}
out[index] = maxval;
}
}
template <class N>
void SoftmaxFunction<N>::GetMax(i2t<true>) {
hipLaunchKernelGGL(( kernel_get_max<ValueType>), dim3(LSDN_GET_BLOCKS(numEl_AllButOne)), dim3(LSDN_CUDA_NUM_THREADS), 0, 0, int(numEl_AllButOne), int(NodeType::sz[0]), NodeType::value, scale_val);
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_ElementwiseMultiply(const int num, const T* data_in1, const T* data_in2, T* data_out) {
CUDA_KERNEL_LOOP(index, num) {
data_out[index] = data_in1[index] * data_in2[index];
}
}
template <typename T>
__global__ void kernel_ElementwiseMultiplyAdd(const int num, const T* data_in1, const T* data_in2, T* data_out) {
CUDA_KERNEL_LOOP(index, num) {
data_out[index] = data_in1[index] * data_in2[index];
}
}
template <class N>
void SoftmaxFunction<N>::ElementwiseMultiply(i2t<true>, SizeType dim, ValueType* data_in1, ValueType* data_in2, ValueType* data_out, bool addToOutput) {
if (addToOutput) {
hipLaunchKernelGGL(( kernel_ElementwiseMultiplyAdd<ValueType>), dim3(LSDN_GET_BLOCKS(numEl_AllButOne)), dim3(LSDN_CUDA_NUM_THREADS), 0, 0, int(dim), data_in1, data_in2, data_out);
} else {
hipLaunchKernelGGL(( kernel_ElementwiseMultiply<ValueType>), dim3(LSDN_GET_BLOCKS(numEl_AllButOne)), dim3(LSDN_CUDA_NUM_THREADS), 0, 0, int(dim), data_in1, data_in2, data_out);
}
check_cuda_errors(__FILE__, __LINE__);
}
template class SoftmaxFunction<Node<double, int, false> >;
template class SoftmaxFunction<Node<double, int, true> >;
template class SoftmaxFunction<Node<float, int, false> >;
template class SoftmaxFunction<Node<float, int, true> >; | de2475b318a53f35981c6dc921583da88eeac910.cu | //Author: Alexander G. Schwing (http://alexander-schwing.de)
//Author: Liang-Chieh (Jay) Chen (http://www.cs.ucla.edu/~lcchen/)
#ifdef _MSC_VER
#pragma warning( disable : 4661 )
#endif
#include "Function_Softmax.h"
#include "cuda_runtime.h"
#include "cublas_v2.h"
#include "../LSDN_CudaCommon.h"
#include "LSDN_mathfunctions.h"
template <typename T>
__global__ void kernel_get_max(const int num, const int dim, const T* data, T* out) {
CUDA_KERNEL_LOOP(index, num) {
T maxval = data[index*dim];
for (int i = 1; i < dim; ++i) {
maxval = max(data[index * dim + i], maxval);
}
out[index] = maxval;
}
}
template <class N>
void SoftmaxFunction<N>::GetMax(i2t<true>) {
kernel_get_max<ValueType><<<LSDN_GET_BLOCKS(numEl_AllButOne), LSDN_CUDA_NUM_THREADS>>>(int(numEl_AllButOne), int(NodeType::sz[0]), NodeType::value, scale_val);
check_cuda_errors(__FILE__, __LINE__);
}
template <typename T>
__global__ void kernel_ElementwiseMultiply(const int num, const T* data_in1, const T* data_in2, T* data_out) {
CUDA_KERNEL_LOOP(index, num) {
data_out[index] = data_in1[index] * data_in2[index];
}
}
template <typename T>
__global__ void kernel_ElementwiseMultiplyAdd(const int num, const T* data_in1, const T* data_in2, T* data_out) {
CUDA_KERNEL_LOOP(index, num) {
data_out[index] = data_in1[index] * data_in2[index];
}
}
template <class N>
void SoftmaxFunction<N>::ElementwiseMultiply(i2t<true>, SizeType dim, ValueType* data_in1, ValueType* data_in2, ValueType* data_out, bool addToOutput) {
if (addToOutput) {
kernel_ElementwiseMultiplyAdd<ValueType><<<LSDN_GET_BLOCKS(numEl_AllButOne), LSDN_CUDA_NUM_THREADS>>>(int(dim), data_in1, data_in2, data_out);
} else {
kernel_ElementwiseMultiply<ValueType><<<LSDN_GET_BLOCKS(numEl_AllButOne), LSDN_CUDA_NUM_THREADS>>>(int(dim), data_in1, data_in2, data_out);
}
check_cuda_errors(__FILE__, __LINE__);
}
template class SoftmaxFunction<Node<double, int, false> >;
template class SoftmaxFunction<Node<double, int, true> >;
template class SoftmaxFunction<Node<float, int, false> >;
template class SoftmaxFunction<Node<float, int, true> >; |
bf2487114b7488a83a428f48cc72c02f23cf1c89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/solver/bicgstab_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The BICGSTAB solver namespace.
*
* @ingroup bicgstab
*/
namespace bicgstab {
constexpr int default_block_size = 512;
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void initialize_kernel(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ b, ValueType *__restrict__ r,
ValueType *__restrict__ rr, ValueType *__restrict__ y,
ValueType *__restrict__ s, ValueType *__restrict__ t,
ValueType *__restrict__ z, ValueType *__restrict__ v,
ValueType *__restrict__ p, ValueType *__restrict__ prev_rho,
ValueType *__restrict__ rho, ValueType *__restrict__ alpha,
ValueType *__restrict__ beta, ValueType *__restrict__ gamma,
ValueType *__restrict__ omega, stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
if (tidx < num_cols) {
prev_rho[tidx] = one<ValueType>();
rho[tidx] = one<ValueType>();
alpha[tidx] = one<ValueType>();
beta[tidx] = one<ValueType>();
gamma[tidx] = one<ValueType>();
omega[tidx] = one<ValueType>();
stop_status[tidx].reset();
}
if (tidx < num_rows * stride) {
r[tidx] = b[tidx];
rr[tidx] = zero<ValueType>();
y[tidx] = zero<ValueType>();
s[tidx] = zero<ValueType>();
t[tidx] = zero<ValueType>();
z[tidx] = zero<ValueType>();
v[tidx] = zero<ValueType>();
p[tidx] = zero<ValueType>();
}
}
template <typename ValueType>
void initialize(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *r,
matrix::Dense<ValueType> *rr, matrix::Dense<ValueType> *y,
matrix::Dense<ValueType> *s, matrix::Dense<ValueType> *t,
matrix::Dense<ValueType> *z, matrix::Dense<ValueType> *v,
matrix::Dense<ValueType> *p, matrix::Dense<ValueType> *prev_rho,
matrix::Dense<ValueType> *rho, matrix::Dense<ValueType> *alpha,
matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *gamma,
matrix::Dense<ValueType> *omega,
Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(b->get_size()[0] * b->get_stride(), block_size.x), 1, 1);
hipLaunchKernelGGL(( initialize_kernel), dim3(grid_size), dim3(block_size), 0, 0,
b->get_size()[0], b->get_size()[1], b->get_stride(),
as_cuda_type(b->get_const_values()), as_cuda_type(r->get_values()),
as_cuda_type(rr->get_values()), as_cuda_type(y->get_values()),
as_cuda_type(s->get_values()), as_cuda_type(t->get_values()),
as_cuda_type(z->get_values()), as_cuda_type(v->get_values()),
as_cuda_type(p->get_values()), as_cuda_type(prev_rho->get_values()),
as_cuda_type(rho->get_values()), as_cuda_type(alpha->get_values()),
as_cuda_type(beta->get_values()), as_cuda_type(gamma->get_values()),
as_cuda_type(omega->get_values()),
as_cuda_type(stop_status->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_INITIALIZE_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void step_1_kernel(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ r, ValueType *__restrict__ p,
const ValueType *__restrict__ v, const ValueType *__restrict__ rho,
const ValueType *__restrict__ prev_rho, const ValueType *__restrict__ alpha,
const ValueType *__restrict__ omega,
const stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].has_stopped()) {
return;
}
auto res = r[tidx];
if (prev_rho[col] * omega[col] != zero<ValueType>()) {
const auto tmp = (rho[col] / prev_rho[col]) * (alpha[col] / omega[col]);
res += tmp * (p[tidx] - omega[col] * v[tidx]);
}
p[tidx] = res;
}
template <typename ValueType>
void step_1(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *r, matrix::Dense<ValueType> *p,
const matrix::Dense<ValueType> *v,
const matrix::Dense<ValueType> *rho,
const matrix::Dense<ValueType> *prev_rho,
const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *omega,
const Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(r->get_size()[0] * r->get_stride(), block_size.x), 1, 1);
hipLaunchKernelGGL(( step_1_kernel), dim3(grid_size), dim3(block_size), 0, 0,
r->get_size()[0], r->get_size()[1], r->get_stride(),
as_cuda_type(r->get_const_values()), as_cuda_type(p->get_values()),
as_cuda_type(v->get_const_values()),
as_cuda_type(rho->get_const_values()),
as_cuda_type(prev_rho->get_const_values()),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(omega->get_const_values()),
as_cuda_type(stop_status->get_const_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_STEP_1_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void step_2_kernel(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ r, ValueType *__restrict__ s,
const ValueType *__restrict__ v, const ValueType *__restrict__ rho,
ValueType *__restrict__ alpha, const ValueType *__restrict__ beta,
const stopping_status *__restrict__ stop_status)
{
const size_type tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const size_type col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].has_stopped()) {
return;
}
auto t_alpha = zero<ValueType>();
auto t_s = r[tidx];
if (beta[col] != zero<ValueType>()) {
t_alpha = rho[col] / beta[col];
t_s -= t_alpha * v[tidx];
}
alpha[col] = t_alpha;
s[tidx] = t_s;
}
template <typename ValueType>
void step_2(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *r, matrix::Dense<ValueType> *s,
const matrix::Dense<ValueType> *v,
const matrix::Dense<ValueType> *rho,
matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *beta,
const Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(r->get_size()[0] * r->get_stride(), block_size.x), 1, 1);
hipLaunchKernelGGL(( step_2_kernel), dim3(grid_size), dim3(block_size), 0, 0,
r->get_size()[0], r->get_size()[1], r->get_stride(),
as_cuda_type(r->get_const_values()), as_cuda_type(s->get_values()),
as_cuda_type(v->get_const_values()),
as_cuda_type(rho->get_const_values()),
as_cuda_type(alpha->get_values()),
as_cuda_type(beta->get_const_values()),
as_cuda_type(stop_status->get_const_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_STEP_2_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void step_3_kernel(
size_type num_rows, size_type num_cols, size_type stride,
size_type x_stride, ValueType *__restrict__ x, ValueType *__restrict__ r,
const ValueType *__restrict__ s, const ValueType *__restrict__ t,
const ValueType *__restrict__ y, const ValueType *__restrict__ z,
const ValueType *__restrict__ alpha, const ValueType *__restrict__ beta,
const ValueType *__restrict__ gamma, ValueType *__restrict__ omega,
const stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto row = tidx / stride;
const auto col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].has_stopped()) {
return;
}
const auto x_pos = row * x_stride + col;
auto t_omega = zero<ValueType>();
auto t_x = x[x_pos] + alpha[col] * y[tidx];
auto t_r = s[tidx];
if (beta[col] != zero<ValueType>()) {
t_omega = gamma[col] / beta[col];
t_x += t_omega * z[tidx];
t_r -= t_omega * t[tidx];
}
omega[col] = t_omega;
x[x_pos] = t_x;
r[tidx] = t_r;
}
template <typename ValueType>
void step_3(
std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *x,
matrix::Dense<ValueType> *r, const matrix::Dense<ValueType> *s,
const matrix::Dense<ValueType> *t, const matrix::Dense<ValueType> *y,
const matrix::Dense<ValueType> *z, const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *beta, const matrix::Dense<ValueType> *gamma,
matrix::Dense<ValueType> *omega, const Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(r->get_size()[0] * r->get_stride(), block_size.x), 1, 1);
hipLaunchKernelGGL(( step_3_kernel), dim3(grid_size), dim3(block_size), 0, 0,
r->get_size()[0], r->get_size()[1], r->get_stride(), x->get_stride(),
as_cuda_type(x->get_values()), as_cuda_type(r->get_values()),
as_cuda_type(s->get_const_values()),
as_cuda_type(t->get_const_values()),
as_cuda_type(y->get_const_values()),
as_cuda_type(z->get_const_values()),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(beta->get_const_values()),
as_cuda_type(gamma->get_const_values()),
as_cuda_type(omega->get_values()),
as_cuda_type(stop_status->get_const_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_STEP_3_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void finalize_kernel(
size_type num_rows, size_type num_cols, size_type stride,
size_type x_stride, ValueType *__restrict__ x,
const ValueType *__restrict__ y, const ValueType *__restrict__ alpha,
stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto row = tidx / stride;
const auto col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].is_finalized() || !stop_status[col].has_stopped()) {
return;
}
const auto x_pos = row * x_stride + col;
x[x_pos] = x[x_pos] + alpha[col] * y[tidx];
stop_status[col].finalize();
}
template <typename ValueType>
void finalize(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *x, const matrix::Dense<ValueType> *y,
const matrix::Dense<ValueType> *alpha,
Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(y->get_size()[0] * y->get_stride(), block_size.x), 1, 1);
hipLaunchKernelGGL(( finalize_kernel), dim3(grid_size), dim3(block_size), 0, 0,
y->get_size()[0], y->get_size()[1], y->get_stride(), x->get_stride(),
as_cuda_type(x->get_values()), as_cuda_type(y->get_const_values()),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(stop_status->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_FINALIZE_KERNEL);
} // namespace bicgstab
} // namespace cuda
} // namespace kernels
} // namespace gko
| bf2487114b7488a83a428f48cc72c02f23cf1c89.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/solver/bicgstab_kernels.hpp"
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The BICGSTAB solver namespace.
*
* @ingroup bicgstab
*/
namespace bicgstab {
constexpr int default_block_size = 512;
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void initialize_kernel(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ b, ValueType *__restrict__ r,
ValueType *__restrict__ rr, ValueType *__restrict__ y,
ValueType *__restrict__ s, ValueType *__restrict__ t,
ValueType *__restrict__ z, ValueType *__restrict__ v,
ValueType *__restrict__ p, ValueType *__restrict__ prev_rho,
ValueType *__restrict__ rho, ValueType *__restrict__ alpha,
ValueType *__restrict__ beta, ValueType *__restrict__ gamma,
ValueType *__restrict__ omega, stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
if (tidx < num_cols) {
prev_rho[tidx] = one<ValueType>();
rho[tidx] = one<ValueType>();
alpha[tidx] = one<ValueType>();
beta[tidx] = one<ValueType>();
gamma[tidx] = one<ValueType>();
omega[tidx] = one<ValueType>();
stop_status[tidx].reset();
}
if (tidx < num_rows * stride) {
r[tidx] = b[tidx];
rr[tidx] = zero<ValueType>();
y[tidx] = zero<ValueType>();
s[tidx] = zero<ValueType>();
t[tidx] = zero<ValueType>();
z[tidx] = zero<ValueType>();
v[tidx] = zero<ValueType>();
p[tidx] = zero<ValueType>();
}
}
template <typename ValueType>
void initialize(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *r,
matrix::Dense<ValueType> *rr, matrix::Dense<ValueType> *y,
matrix::Dense<ValueType> *s, matrix::Dense<ValueType> *t,
matrix::Dense<ValueType> *z, matrix::Dense<ValueType> *v,
matrix::Dense<ValueType> *p, matrix::Dense<ValueType> *prev_rho,
matrix::Dense<ValueType> *rho, matrix::Dense<ValueType> *alpha,
matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *gamma,
matrix::Dense<ValueType> *omega,
Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(b->get_size()[0] * b->get_stride(), block_size.x), 1, 1);
initialize_kernel<<<grid_size, block_size, 0, 0>>>(
b->get_size()[0], b->get_size()[1], b->get_stride(),
as_cuda_type(b->get_const_values()), as_cuda_type(r->get_values()),
as_cuda_type(rr->get_values()), as_cuda_type(y->get_values()),
as_cuda_type(s->get_values()), as_cuda_type(t->get_values()),
as_cuda_type(z->get_values()), as_cuda_type(v->get_values()),
as_cuda_type(p->get_values()), as_cuda_type(prev_rho->get_values()),
as_cuda_type(rho->get_values()), as_cuda_type(alpha->get_values()),
as_cuda_type(beta->get_values()), as_cuda_type(gamma->get_values()),
as_cuda_type(omega->get_values()),
as_cuda_type(stop_status->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_INITIALIZE_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void step_1_kernel(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ r, ValueType *__restrict__ p,
const ValueType *__restrict__ v, const ValueType *__restrict__ rho,
const ValueType *__restrict__ prev_rho, const ValueType *__restrict__ alpha,
const ValueType *__restrict__ omega,
const stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].has_stopped()) {
return;
}
auto res = r[tidx];
if (prev_rho[col] * omega[col] != zero<ValueType>()) {
const auto tmp = (rho[col] / prev_rho[col]) * (alpha[col] / omega[col]);
res += tmp * (p[tidx] - omega[col] * v[tidx]);
}
p[tidx] = res;
}
template <typename ValueType>
void step_1(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *r, matrix::Dense<ValueType> *p,
const matrix::Dense<ValueType> *v,
const matrix::Dense<ValueType> *rho,
const matrix::Dense<ValueType> *prev_rho,
const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *omega,
const Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(r->get_size()[0] * r->get_stride(), block_size.x), 1, 1);
step_1_kernel<<<grid_size, block_size, 0, 0>>>(
r->get_size()[0], r->get_size()[1], r->get_stride(),
as_cuda_type(r->get_const_values()), as_cuda_type(p->get_values()),
as_cuda_type(v->get_const_values()),
as_cuda_type(rho->get_const_values()),
as_cuda_type(prev_rho->get_const_values()),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(omega->get_const_values()),
as_cuda_type(stop_status->get_const_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_STEP_1_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void step_2_kernel(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ r, ValueType *__restrict__ s,
const ValueType *__restrict__ v, const ValueType *__restrict__ rho,
ValueType *__restrict__ alpha, const ValueType *__restrict__ beta,
const stopping_status *__restrict__ stop_status)
{
const size_type tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const size_type col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].has_stopped()) {
return;
}
auto t_alpha = zero<ValueType>();
auto t_s = r[tidx];
if (beta[col] != zero<ValueType>()) {
t_alpha = rho[col] / beta[col];
t_s -= t_alpha * v[tidx];
}
alpha[col] = t_alpha;
s[tidx] = t_s;
}
template <typename ValueType>
void step_2(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *r, matrix::Dense<ValueType> *s,
const matrix::Dense<ValueType> *v,
const matrix::Dense<ValueType> *rho,
matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *beta,
const Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(r->get_size()[0] * r->get_stride(), block_size.x), 1, 1);
step_2_kernel<<<grid_size, block_size, 0, 0>>>(
r->get_size()[0], r->get_size()[1], r->get_stride(),
as_cuda_type(r->get_const_values()), as_cuda_type(s->get_values()),
as_cuda_type(v->get_const_values()),
as_cuda_type(rho->get_const_values()),
as_cuda_type(alpha->get_values()),
as_cuda_type(beta->get_const_values()),
as_cuda_type(stop_status->get_const_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_STEP_2_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void step_3_kernel(
size_type num_rows, size_type num_cols, size_type stride,
size_type x_stride, ValueType *__restrict__ x, ValueType *__restrict__ r,
const ValueType *__restrict__ s, const ValueType *__restrict__ t,
const ValueType *__restrict__ y, const ValueType *__restrict__ z,
const ValueType *__restrict__ alpha, const ValueType *__restrict__ beta,
const ValueType *__restrict__ gamma, ValueType *__restrict__ omega,
const stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto row = tidx / stride;
const auto col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].has_stopped()) {
return;
}
const auto x_pos = row * x_stride + col;
auto t_omega = zero<ValueType>();
auto t_x = x[x_pos] + alpha[col] * y[tidx];
auto t_r = s[tidx];
if (beta[col] != zero<ValueType>()) {
t_omega = gamma[col] / beta[col];
t_x += t_omega * z[tidx];
t_r -= t_omega * t[tidx];
}
omega[col] = t_omega;
x[x_pos] = t_x;
r[tidx] = t_r;
}
template <typename ValueType>
void step_3(
std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *x,
matrix::Dense<ValueType> *r, const matrix::Dense<ValueType> *s,
const matrix::Dense<ValueType> *t, const matrix::Dense<ValueType> *y,
const matrix::Dense<ValueType> *z, const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *beta, const matrix::Dense<ValueType> *gamma,
matrix::Dense<ValueType> *omega, const Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(r->get_size()[0] * r->get_stride(), block_size.x), 1, 1);
step_3_kernel<<<grid_size, block_size, 0, 0>>>(
r->get_size()[0], r->get_size()[1], r->get_stride(), x->get_stride(),
as_cuda_type(x->get_values()), as_cuda_type(r->get_values()),
as_cuda_type(s->get_const_values()),
as_cuda_type(t->get_const_values()),
as_cuda_type(y->get_const_values()),
as_cuda_type(z->get_const_values()),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(beta->get_const_values()),
as_cuda_type(gamma->get_const_values()),
as_cuda_type(omega->get_values()),
as_cuda_type(stop_status->get_const_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_STEP_3_KERNEL);
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void finalize_kernel(
size_type num_rows, size_type num_cols, size_type stride,
size_type x_stride, ValueType *__restrict__ x,
const ValueType *__restrict__ y, const ValueType *__restrict__ alpha,
stopping_status *__restrict__ stop_status)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto row = tidx / stride;
const auto col = tidx % stride;
if (col >= num_cols || tidx >= num_rows * stride ||
stop_status[col].is_finalized() || !stop_status[col].has_stopped()) {
return;
}
const auto x_pos = row * x_stride + col;
x[x_pos] = x[x_pos] + alpha[col] * y[tidx];
stop_status[col].finalize();
}
template <typename ValueType>
void finalize(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *x, const matrix::Dense<ValueType> *y,
const matrix::Dense<ValueType> *alpha,
Array<stopping_status> *stop_status)
{
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(y->get_size()[0] * y->get_stride(), block_size.x), 1, 1);
finalize_kernel<<<grid_size, block_size, 0, 0>>>(
y->get_size()[0], y->get_size()[1], y->get_stride(), x->get_stride(),
as_cuda_type(x->get_values()), as_cuda_type(y->get_const_values()),
as_cuda_type(alpha->get_const_values()),
as_cuda_type(stop_status->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_BICGSTAB_FINALIZE_KERNEL);
} // namespace bicgstab
} // namespace cuda
} // namespace kernels
} // namespace gko
|
66494427a0f4b2a1415b05fc706a42bc7b038a4b.hip | // !!! This is a file automatically generated by hipify!!!
//#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "SACudaLibrary.h"
#include "SACudaKernel.h"
extern "C" IntArray FindInverseTrends(double * h_data, int h_length)
{
IntArray returnvalue;
int oldlength = h_length;
if(h_length == 0)
{
returnvalue.length = 0;
return returnvalue;
}
//*************************** CUDA ***************************
double * d_data;
int * d_length;
REQUIRE_SUCCESS(hipMalloc((void **)&d_data, sizeof(double) * h_length));
REQUIRE_SUCCESS(hipMalloc((void **)&d_length, sizeof(int)));
REQUIRE_SUCCESS(hipMemcpy((void *)d_data, (void *)h_data, sizeof(double) * h_length, hipMemcpyHostToDevice));
REQUIRE_SUCCESS(hipMemcpy((void *)d_length, (void *)&h_length, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( FindInverseTrendsKernel), dim3((h_length-1)/1024 + 1),dim3(1024), 0, 0, d_data, d_length);
REQUIRE_SUCCESS(hipMemcpy((void *)&h_length, (void *)d_length, sizeof(int), hipMemcpyDeviceToHost));
if(h_length != 0)
REQUIRE_SUCCESS(hipMemcpy((void *)h_data, (void *)d_data, sizeof(double) * oldlength, hipMemcpyDeviceToHost));
hipFree((void *)d_data);
hipFree((void *)d_length);
//************************* END CUDA *************************
int found = 0;
int i = 0;
returnvalue.values = (int *)malloc(sizeof(int) * h_length);
while(found < h_length)
{
if(h_data[i] > 0)
returnvalue.values[found++] = i+2;
++i;
}
returnvalue.length = h_length;
return returnvalue;
}
extern "C" DoubleArray CalculateMarketAverage(double * h_data, int entries, int timesteps)
{
DoubleArray returnvalue;
if(entries == 0 || timesteps == 0)
{
returnvalue.length = 0;
return returnvalue;
}
//*************************** CUDA ***************************
double * d_data;
REQUIRE_SUCCESS(hipMalloc((void **)&d_data, sizeof(double) * entries * timesteps));
REQUIRE_SUCCESS(hipMemcpy((void *)d_data, (void *)h_data, sizeof(double) * entries * timesteps, hipMemcpyHostToDevice));
for(int i = entries; i > 1; i = ((i-1)/2)+1)
{
hipLaunchKernelGGL(( CalculateMarketAverageKernelReduce), dim3(dim3(((i/2 + i%2)-1)/32 + 1, (timesteps-1/32 + 1))), dim3(dim3(32, 32)), 0, 0, d_data, i, timesteps);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( CalculateMarketAverageKernelFinalize), dim3((entries-1)/512 + 1),dim3(512), 0, 0, d_data, entries, timesteps);
REQUIRE_SUCCESS(hipMemcpy((void *)h_data, (void *)d_data, sizeof(double) * timesteps*entries, hipMemcpyDeviceToHost));
hipFree((void *)d_data);
//************************* END CUDA *************************
returnvalue.values = h_data;
returnvalue.length = timesteps;
return returnvalue;
}
| 66494427a0f4b2a1415b05fc706a42bc7b038a4b.cu | //#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "SACudaLibrary.h"
#include "SACudaKernel.h"
extern "C" IntArray FindInverseTrends(double * h_data, int h_length)
{
IntArray returnvalue;
int oldlength = h_length;
if(h_length == 0)
{
returnvalue.length = 0;
return returnvalue;
}
//*************************** CUDA ***************************
double * d_data;
int * d_length;
REQUIRE_SUCCESS(cudaMalloc((void **)&d_data, sizeof(double) * h_length));
REQUIRE_SUCCESS(cudaMalloc((void **)&d_length, sizeof(int)));
REQUIRE_SUCCESS(cudaMemcpy((void *)d_data, (void *)h_data, sizeof(double) * h_length, cudaMemcpyHostToDevice));
REQUIRE_SUCCESS(cudaMemcpy((void *)d_length, (void *)&h_length, sizeof(int), cudaMemcpyHostToDevice));
FindInverseTrendsKernel<<<(h_length-1)/1024 + 1,1024>>>(d_data, d_length);
REQUIRE_SUCCESS(cudaMemcpy((void *)&h_length, (void *)d_length, sizeof(int), cudaMemcpyDeviceToHost));
if(h_length != 0)
REQUIRE_SUCCESS(cudaMemcpy((void *)h_data, (void *)d_data, sizeof(double) * oldlength, cudaMemcpyDeviceToHost));
cudaFree((void *)d_data);
cudaFree((void *)d_length);
//************************* END CUDA *************************
int found = 0;
int i = 0;
returnvalue.values = (int *)malloc(sizeof(int) * h_length);
while(found < h_length)
{
if(h_data[i] > 0)
returnvalue.values[found++] = i+2;
++i;
}
returnvalue.length = h_length;
return returnvalue;
}
extern "C" DoubleArray CalculateMarketAverage(double * h_data, int entries, int timesteps)
{
DoubleArray returnvalue;
if(entries == 0 || timesteps == 0)
{
returnvalue.length = 0;
return returnvalue;
}
//*************************** CUDA ***************************
double * d_data;
REQUIRE_SUCCESS(cudaMalloc((void **)&d_data, sizeof(double) * entries * timesteps));
REQUIRE_SUCCESS(cudaMemcpy((void *)d_data, (void *)h_data, sizeof(double) * entries * timesteps, cudaMemcpyHostToDevice));
for(int i = entries; i > 1; i = ((i-1)/2)+1)
{
CalculateMarketAverageKernelReduce<<<dim3(((i/2 + i%2)-1)/32 + 1, (timesteps-1/32 + 1)), dim3(32, 32)>>>(d_data, i, timesteps);
cudaDeviceSynchronize();
}
CalculateMarketAverageKernelFinalize<<<(entries-1)/512 + 1,512>>>(d_data, entries, timesteps);
REQUIRE_SUCCESS(cudaMemcpy((void *)h_data, (void *)d_data, sizeof(double) * timesteps*entries, cudaMemcpyDeviceToHost));
cudaFree((void *)d_data);
//************************* END CUDA *************************
returnvalue.values = h_data;
returnvalue.length = timesteps;
return returnvalue;
}
|
dfeda4d3569c8d3324e666000e018d05adf9a8ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13) {
if (comp < (var_4 - (var_5 - +1.8332E-43f + +1.6660E35f))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
var_6[i] = +1.7925E36f;
comp = var_6[i] * var_7 - powf(var_8 - var_9 / +1.3474E-36f / (var_10 * var_11), (+1.9266E34f / -1.7122E25f - +0.0f - (var_12 + var_13)));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14);
hipDeviceSynchronize();
return 0;
}
| dfeda4d3569c8d3324e666000e018d05adf9a8ac.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13) {
if (comp < (var_4 - (var_5 - +1.8332E-43f + +1.6660E35f))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
var_6[i] = +1.7925E36f;
comp = var_6[i] * var_7 - powf(var_8 - var_9 / +1.3474E-36f / (var_10 * var_11), (+1.9266E34f / -1.7122E25f - +0.0f - (var_12 + var_13)));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14);
cudaDeviceSynchronize();
return 0;
}
|
6e59db49d79599e6863769f4707a9f5f7a60ddda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *output, int length, int *n1, int *n2) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n1[blockID] + n2[blockID];
} | 6e59db49d79599e6863769f4707a9f5f7a60ddda.cu | #include "includes.h"
__global__ void add(int *output, int length, int *n1, int *n2) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n1[blockID] + n2[blockID];
} |
f1e4eb77d98107817afb4a8c069a645a211a3287.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/preconditioner/jacobi_kernels.hpp"
#include <ginkgo/config.hpp>
#include <ginkgo/core/base/exception_helpers.hpp>
#include "core/base/extended_float.hpp"
#include "core/preconditioner/jacobi_utils.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/diagonal_block_manipulation.cuh"
#include "cuda/components/thread_ids.cuh"
#include "cuda/components/uninitialized_array.hpp"
#include "cuda/components/warp_blas.cuh"
#include "cuda/components/zero_array.hpp"
#include "cuda/preconditioner/jacobi_common.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Jacobi preconditioner namespace.
* @ref Jacobi
* @ingroup jacobi
*/
namespace jacobi {
namespace kernel {
template <int max_block_size, typename ReducedType, typename Group,
typename ValueType, typename IndexType>
__device__ __forceinline__ bool validate_precision_reduction_feasibility(
Group &__restrict__ group, IndexType block_size,
ValueType *__restrict__ row, ValueType *__restrict__ work, size_type stride)
{
using gko::detail::float_traits;
// save original data and reduce precision
if (group.thread_rank() < block_size) {
#pragma unroll
for (auto i = 0u; i < max_block_size; ++i) {
if (i >= block_size) {
break;
}
work[i * stride + group.thread_rank()] = row[i];
row[i] = static_cast<ValueType>(static_cast<ReducedType>(row[i]));
}
}
// compute the condition number
auto perm = group.thread_rank();
auto trans_perm = perm;
auto block_cond = compute_infinity_norm<max_block_size>(group, block_size,
block_size, row);
auto succeeded =
invert_block<max_block_size>(group, block_size, row, perm, trans_perm);
block_cond *= compute_infinity_norm<max_block_size>(group, block_size,
block_size, row);
// restore original data
if (group.thread_rank() < block_size) {
#pragma unroll
for (auto i = 0u; i < max_block_size; ++i) {
if (i >= block_size) {
break;
}
row[i] = work[i * stride + group.thread_rank()];
}
}
return succeeded && block_cond >= 1.0 &&
block_cond * float_traits<remove_complex<ValueType>>::eps < 1e-3;
}
template <int max_block_size, int subwarp_size, int warps_per_block,
typename ValueType, typename IndexType>
__global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size)
generate(size_type num_rows, const IndexType *__restrict__ row_ptrs,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values,
ValueType *__restrict__ block_data,
preconditioner::block_interleaved_storage_scheme<IndexType>
storage_scheme,
const IndexType *__restrict__ block_ptrs, size_type num_blocks)
{
const auto block_id =
thread::get_subwarp_id<subwarp_size, warps_per_block>();
const auto block = group::this_thread_block();
ValueType row[max_block_size];
__shared__ UninitializedArray<ValueType, max_block_size * warps_per_block>
workspace;
csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>(
block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs,
values, block_ptrs, num_blocks, row, 1,
workspace + threadIdx.z * max_block_size);
const auto subwarp = group::tiled_partition<subwarp_size>(block);
if (block_id < num_blocks) {
const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id];
auto perm = subwarp.thread_rank();
auto trans_perm = subwarp.thread_rank();
invert_block<max_block_size>(subwarp, block_size, row, perm,
trans_perm);
copy_matrix<max_block_size, and_transpose>(
subwarp, block_size, row, 1, perm, trans_perm,
block_data + storage_scheme.get_global_block_offset(block_id),
storage_scheme.get_stride());
}
}
template <int max_block_size, int subwarp_size, int warps_per_block,
typename ValueType, typename IndexType>
__global__ void
__launch_bounds__(warps_per_block *cuda_config::warp_size) adaptive_generate(
size_type num_rows, const IndexType *__restrict__ row_ptrs,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values, remove_complex<ValueType> accuracy,
ValueType *__restrict__ block_data,
preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme,
remove_complex<ValueType> *__restrict__ conditioning,
precision_reduction *__restrict__ block_precisions,
const IndexType *__restrict__ block_ptrs, size_type num_blocks)
{
// extract blocks
const auto block_id =
thread::get_subwarp_id<subwarp_size, warps_per_block>();
const auto block = group::this_thread_block();
ValueType row[max_block_size];
__shared__ UninitializedArray<ValueType, max_block_size * warps_per_block>
workspace;
csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>(
block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs,
values, block_ptrs, num_blocks, row, 1,
workspace + threadIdx.z * max_block_size);
// compute inverse and figure out the correct precision
const auto subwarp = group::tiled_partition<subwarp_size>(block);
const auto block_size =
block_id < num_blocks ? block_ptrs[block_id + 1] - block_ptrs[block_id]
: 0;
auto perm = subwarp.thread_rank();
auto trans_perm = subwarp.thread_rank();
auto prec_descriptor = ~uint32{};
if (block_id < num_blocks) {
auto block_cond = compute_infinity_norm<max_block_size>(
subwarp, block_size, block_size, row);
invert_block<max_block_size>(subwarp, block_size, row, perm,
trans_perm);
block_cond *= compute_infinity_norm<max_block_size>(subwarp, block_size,
block_size, row);
conditioning[block_id] = block_cond;
const auto prec = block_precisions[block_id];
prec_descriptor =
preconditioner::detail::precision_reduction_descriptor::singleton(
prec);
if (prec == precision_reduction::autodetect()) {
using preconditioner::detail::get_supported_storage_reductions;
prec_descriptor = get_supported_storage_reductions<ValueType>(
accuracy, block_cond,
[&subwarp, &block_size, &row, &block_data, &storage_scheme,
&block_id] {
using target = reduce_precision<ValueType>;
return validate_precision_reduction_feasibility<
max_block_size, target>(
subwarp, block_size, row,
block_data +
storage_scheme.get_global_block_offset(block_id),
storage_scheme.get_stride());
},
[&subwarp, &block_size, &row, &block_data, &storage_scheme,
&block_id] {
using target =
reduce_precision<reduce_precision<ValueType>>;
return validate_precision_reduction_feasibility<
max_block_size, target>(
subwarp, block_size, row,
block_data +
storage_scheme.get_global_block_offset(block_id),
storage_scheme.get_stride());
});
}
}
// make sure all blocks in the group have the same precision
const auto warp = group::tiled_partition<cuda_config::warp_size>(block);
const auto prec =
preconditioner::detail::get_optimal_storage_reduction(reduce(
warp, prec_descriptor, [](uint32 x, uint32 y) { return x & y; }));
// store the block back into memory
if (block_id < num_blocks) {
block_precisions[block_id] = prec;
GKO_PRECONDITIONER_JACOBI_RESOLVE_PRECISION(
ValueType, prec,
copy_matrix<max_block_size, and_transpose>(
subwarp, block_size, row, 1, perm, trans_perm,
reinterpret_cast<resolved_precision *>(
block_data + storage_scheme.get_group_offset(block_id)) +
storage_scheme.get_block_offset(block_id),
storage_scheme.get_stride()));
}
}
} // namespace kernel
namespace {
template <int warps_per_block, int max_block_size, typename ValueType,
typename IndexType>
void generate(syn::value_list<int, max_block_size>,
const matrix::Csr<ValueType, IndexType> *mtx,
remove_complex<ValueType> accuracy, ValueType *block_data,
const preconditioner::block_interleaved_storage_scheme<IndexType>
&storage_scheme,
remove_complex<ValueType> *conditioning,
precision_reduction *block_precisions,
const IndexType *block_ptrs, size_type num_blocks)
{
constexpr int subwarp_size = get_larger_power(max_block_size);
constexpr int blocks_per_warp = cuda_config::warp_size / subwarp_size;
const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp),
1, 1);
const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block);
if (block_precisions) {
hipLaunchKernelGGL(( kernel::adaptive_generate<max_block_size, subwarp_size, warps_per_block>)
, dim3(grid_size), dim3(block_size), 0, 0,
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(accuracy),
as_cuda_type(block_data), storage_scheme,
as_cuda_type(conditioning), block_precisions, block_ptrs,
num_blocks);
} else {
hipLaunchKernelGGL(( kernel::generate<max_block_size, subwarp_size, warps_per_block>)
, dim3(grid_size), dim3(block_size), 0, 0,
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(block_data),
storage_scheme, block_ptrs, num_blocks);
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_generate, generate);
} // namespace
template <typename ValueType, typename IndexType>
void generate(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType> *system_matrix,
size_type num_blocks, uint32 max_block_size,
remove_complex<ValueType> accuracy,
const preconditioner::block_interleaved_storage_scheme<IndexType>
&storage_scheme,
Array<remove_complex<ValueType>> &conditioning,
Array<precision_reduction> &block_precisions,
const Array<IndexType> &block_pointers, Array<ValueType> &blocks)
{
zero_array(blocks.get_num_elems(), blocks.get_data());
select_generate(compiled_kernels(),
[&](int compiled_block_size) {
return max_block_size <= compiled_block_size;
},
syn::value_list<int, cuda_config::min_warps_per_block>(),
syn::type_list<>(), system_matrix, accuracy,
blocks.get_data(), storage_scheme, conditioning.get_data(),
block_precisions.get_data(),
block_pointers.get_const_data(), num_blocks);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_JACOBI_GENERATE_KERNEL);
} // namespace jacobi
} // namespace cuda
} // namespace kernels
} // namespace gko
| f1e4eb77d98107817afb4a8c069a645a211a3287.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/preconditioner/jacobi_kernels.hpp"
#include <ginkgo/config.hpp>
#include <ginkgo/core/base/exception_helpers.hpp>
#include "core/base/extended_float.hpp"
#include "core/preconditioner/jacobi_utils.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/diagonal_block_manipulation.cuh"
#include "cuda/components/thread_ids.cuh"
#include "cuda/components/uninitialized_array.hpp"
#include "cuda/components/warp_blas.cuh"
#include "cuda/components/zero_array.hpp"
#include "cuda/preconditioner/jacobi_common.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Jacobi preconditioner namespace.
* @ref Jacobi
* @ingroup jacobi
*/
namespace jacobi {
namespace kernel {
template <int max_block_size, typename ReducedType, typename Group,
typename ValueType, typename IndexType>
__device__ __forceinline__ bool validate_precision_reduction_feasibility(
Group &__restrict__ group, IndexType block_size,
ValueType *__restrict__ row, ValueType *__restrict__ work, size_type stride)
{
using gko::detail::float_traits;
// save original data and reduce precision
if (group.thread_rank() < block_size) {
#pragma unroll
for (auto i = 0u; i < max_block_size; ++i) {
if (i >= block_size) {
break;
}
work[i * stride + group.thread_rank()] = row[i];
row[i] = static_cast<ValueType>(static_cast<ReducedType>(row[i]));
}
}
// compute the condition number
auto perm = group.thread_rank();
auto trans_perm = perm;
auto block_cond = compute_infinity_norm<max_block_size>(group, block_size,
block_size, row);
auto succeeded =
invert_block<max_block_size>(group, block_size, row, perm, trans_perm);
block_cond *= compute_infinity_norm<max_block_size>(group, block_size,
block_size, row);
// restore original data
if (group.thread_rank() < block_size) {
#pragma unroll
for (auto i = 0u; i < max_block_size; ++i) {
if (i >= block_size) {
break;
}
row[i] = work[i * stride + group.thread_rank()];
}
}
return succeeded && block_cond >= 1.0 &&
block_cond * float_traits<remove_complex<ValueType>>::eps < 1e-3;
}
template <int max_block_size, int subwarp_size, int warps_per_block,
typename ValueType, typename IndexType>
__global__ void __launch_bounds__(warps_per_block *cuda_config::warp_size)
generate(size_type num_rows, const IndexType *__restrict__ row_ptrs,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values,
ValueType *__restrict__ block_data,
preconditioner::block_interleaved_storage_scheme<IndexType>
storage_scheme,
const IndexType *__restrict__ block_ptrs, size_type num_blocks)
{
const auto block_id =
thread::get_subwarp_id<subwarp_size, warps_per_block>();
const auto block = group::this_thread_block();
ValueType row[max_block_size];
__shared__ UninitializedArray<ValueType, max_block_size * warps_per_block>
workspace;
csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>(
block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs,
values, block_ptrs, num_blocks, row, 1,
workspace + threadIdx.z * max_block_size);
const auto subwarp = group::tiled_partition<subwarp_size>(block);
if (block_id < num_blocks) {
const auto block_size = block_ptrs[block_id + 1] - block_ptrs[block_id];
auto perm = subwarp.thread_rank();
auto trans_perm = subwarp.thread_rank();
invert_block<max_block_size>(subwarp, block_size, row, perm,
trans_perm);
copy_matrix<max_block_size, and_transpose>(
subwarp, block_size, row, 1, perm, trans_perm,
block_data + storage_scheme.get_global_block_offset(block_id),
storage_scheme.get_stride());
}
}
template <int max_block_size, int subwarp_size, int warps_per_block,
typename ValueType, typename IndexType>
__global__ void
__launch_bounds__(warps_per_block *cuda_config::warp_size) adaptive_generate(
size_type num_rows, const IndexType *__restrict__ row_ptrs,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values, remove_complex<ValueType> accuracy,
ValueType *__restrict__ block_data,
preconditioner::block_interleaved_storage_scheme<IndexType> storage_scheme,
remove_complex<ValueType> *__restrict__ conditioning,
precision_reduction *__restrict__ block_precisions,
const IndexType *__restrict__ block_ptrs, size_type num_blocks)
{
// extract blocks
const auto block_id =
thread::get_subwarp_id<subwarp_size, warps_per_block>();
const auto block = group::this_thread_block();
ValueType row[max_block_size];
__shared__ UninitializedArray<ValueType, max_block_size * warps_per_block>
workspace;
csr::extract_transposed_diag_blocks<max_block_size, warps_per_block>(
block, cuda_config::warp_size / subwarp_size, row_ptrs, col_idxs,
values, block_ptrs, num_blocks, row, 1,
workspace + threadIdx.z * max_block_size);
// compute inverse and figure out the correct precision
const auto subwarp = group::tiled_partition<subwarp_size>(block);
const auto block_size =
block_id < num_blocks ? block_ptrs[block_id + 1] - block_ptrs[block_id]
: 0;
auto perm = subwarp.thread_rank();
auto trans_perm = subwarp.thread_rank();
auto prec_descriptor = ~uint32{};
if (block_id < num_blocks) {
auto block_cond = compute_infinity_norm<max_block_size>(
subwarp, block_size, block_size, row);
invert_block<max_block_size>(subwarp, block_size, row, perm,
trans_perm);
block_cond *= compute_infinity_norm<max_block_size>(subwarp, block_size,
block_size, row);
conditioning[block_id] = block_cond;
const auto prec = block_precisions[block_id];
prec_descriptor =
preconditioner::detail::precision_reduction_descriptor::singleton(
prec);
if (prec == precision_reduction::autodetect()) {
using preconditioner::detail::get_supported_storage_reductions;
prec_descriptor = get_supported_storage_reductions<ValueType>(
accuracy, block_cond,
[&subwarp, &block_size, &row, &block_data, &storage_scheme,
&block_id] {
using target = reduce_precision<ValueType>;
return validate_precision_reduction_feasibility<
max_block_size, target>(
subwarp, block_size, row,
block_data +
storage_scheme.get_global_block_offset(block_id),
storage_scheme.get_stride());
},
[&subwarp, &block_size, &row, &block_data, &storage_scheme,
&block_id] {
using target =
reduce_precision<reduce_precision<ValueType>>;
return validate_precision_reduction_feasibility<
max_block_size, target>(
subwarp, block_size, row,
block_data +
storage_scheme.get_global_block_offset(block_id),
storage_scheme.get_stride());
});
}
}
// make sure all blocks in the group have the same precision
const auto warp = group::tiled_partition<cuda_config::warp_size>(block);
const auto prec =
preconditioner::detail::get_optimal_storage_reduction(reduce(
warp, prec_descriptor, [](uint32 x, uint32 y) { return x & y; }));
// store the block back into memory
if (block_id < num_blocks) {
block_precisions[block_id] = prec;
GKO_PRECONDITIONER_JACOBI_RESOLVE_PRECISION(
ValueType, prec,
copy_matrix<max_block_size, and_transpose>(
subwarp, block_size, row, 1, perm, trans_perm,
reinterpret_cast<resolved_precision *>(
block_data + storage_scheme.get_group_offset(block_id)) +
storage_scheme.get_block_offset(block_id),
storage_scheme.get_stride()));
}
}
} // namespace kernel
namespace {
template <int warps_per_block, int max_block_size, typename ValueType,
typename IndexType>
void generate(syn::value_list<int, max_block_size>,
const matrix::Csr<ValueType, IndexType> *mtx,
remove_complex<ValueType> accuracy, ValueType *block_data,
const preconditioner::block_interleaved_storage_scheme<IndexType>
&storage_scheme,
remove_complex<ValueType> *conditioning,
precision_reduction *block_precisions,
const IndexType *block_ptrs, size_type num_blocks)
{
constexpr int subwarp_size = get_larger_power(max_block_size);
constexpr int blocks_per_warp = cuda_config::warp_size / subwarp_size;
const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp),
1, 1);
const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block);
if (block_precisions) {
kernel::adaptive_generate<max_block_size, subwarp_size, warps_per_block>
<<<grid_size, block_size, 0, 0>>>(
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(accuracy),
as_cuda_type(block_data), storage_scheme,
as_cuda_type(conditioning), block_precisions, block_ptrs,
num_blocks);
} else {
kernel::generate<max_block_size, subwarp_size, warps_per_block>
<<<grid_size, block_size, 0, 0>>>(
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(block_data),
storage_scheme, block_ptrs, num_blocks);
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_generate, generate);
} // namespace
template <typename ValueType, typename IndexType>
void generate(std::shared_ptr<const CudaExecutor> exec,
const matrix::Csr<ValueType, IndexType> *system_matrix,
size_type num_blocks, uint32 max_block_size,
remove_complex<ValueType> accuracy,
const preconditioner::block_interleaved_storage_scheme<IndexType>
&storage_scheme,
Array<remove_complex<ValueType>> &conditioning,
Array<precision_reduction> &block_precisions,
const Array<IndexType> &block_pointers, Array<ValueType> &blocks)
{
zero_array(blocks.get_num_elems(), blocks.get_data());
select_generate(compiled_kernels(),
[&](int compiled_block_size) {
return max_block_size <= compiled_block_size;
},
syn::value_list<int, cuda_config::min_warps_per_block>(),
syn::type_list<>(), system_matrix, accuracy,
blocks.get_data(), storage_scheme, conditioning.get_data(),
block_precisions.get_data(),
block_pointers.get_const_data(), num_blocks);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_JACOBI_GENERATE_KERNEL);
} // namespace jacobi
} // namespace cuda
} // namespace kernels
} // namespace gko
|
6b7145d9ada815d086b1da8b03b59d1fe719ff62.hip | // !!! This is a file automatically generated by hipify!!!
// Includes, system
// #include <stdio.h>
// #include <stdlib.h>
// Includes, cuda
// #include <hip/hip_runtime.h>
// #include <rocblas.h>
// Includes, cuda helper functions
// #include <helper_cuda.h>
// For the functors
#include "detail/ctc_helper.h"
#include "ctc.h"
const int warp_size = 32;
template<int NT, typename T, typename Rop>
struct CTAReduce;
template<int NT, typename T, typename Rop>
struct CTAReduce {
enum { Size = NT, Capacity = NT };
struct Storage { T shared[Capacity]; };
__device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) {
T* s = storage.shared;
s[tid] = x;
__syncthreads();
// Fold the data in half with each pass.
#pragma unroll
for(int offset = NT / 2; offset >= warp_size; offset /= 2) {
if(tid + offset < count && tid < offset) {
// Read from the right half and store to the left half.
x = g(x, s[offset + tid]);
s[tid] = x;
}
__syncthreads();
}
T shuff;
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
shuff = __shfl_down_sync(0xFFFFFFFF, x, offset);
#else
shuff = __shfl_down(x, offset);
#endif
if (tid + offset < count && tid < offset)
x = g(x, shuff);
}
return x;
}
};
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_rows(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
typedef CTAReduce<NT, T, Rop> R;
__shared__ typename R::Storage storage;
int tid = threadIdx.x;
int idx = tid;
int col = blockIdx.x;
T curr;
// Each block works on a column
if (idx < num_rows)
curr = f(input[idx + col*num_rows]);
idx += NT;
while (idx < num_rows) {
curr = g(curr, f(input[idx + col*num_rows]));
idx += NT;
}
// Sum thread-totals over the CTA.
curr = R::reduce(tid, curr, storage, num_rows, g);
// Store result in out
if (tid == 0)
output[col] = curr;
}
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_cols(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
__shared__ T s[NT];
int warps_per_block = NT / warp_size;
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = threadIdx.y;
T curr;
if (row < num_rows && col < num_cols) {
curr = f(input[row + col*num_rows]);
col += blockDim.y;
while (col < num_cols) {
curr = g(curr, f(input[row + col*num_rows]));
col += blockDim.y;
}
}
s[threadIdx.x * warps_per_block + threadIdx.y] = curr;
__syncthreads();
// Reduce
if (threadIdx.y == 0 && row < num_rows) {
#pragma unroll
for (int i = 1; i < warps_per_block && i < num_cols; ++i)
curr = g(curr, s[i + threadIdx.x * warps_per_block]);
output[row] = curr;
}
}
struct ReduceHelper {
template<typename T, typename Iof, typename Rof>
static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, hipStream_t stream) {
int grid_size;
if (axis) {
grid_size = num_cols;
hipLaunchKernelGGL(( reduce_rows<128>), dim3(grid_size), dim3(128), 0, stream,
f, g, input, output, num_rows, num_cols);
} else {
dim3 tpb(warp_size, 128 / warp_size);
grid_size = (num_cols + warp_size - 1)/warp_size;
hipLaunchKernelGGL(( reduce_cols<128>), dim3(grid_size), dim3(tpb), 0, stream,
f, g, input, output, num_rows, num_cols);
}
}
};
template<typename T, typename Iof, typename Rof>
ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, hipStream_t stream) {
ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream);
hipStreamSynchronize(stream);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
return CTC_STATUS_EXECUTION_FAILED;
return CTC_STATUS_SUCCESS;
}
template<typename T>
ctcStatus_t reduce_negate(const T *input, T *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::negate<T>(), ctc_helper::add<T>(), input, output, rows, cols, axis, stream);
}
template ctcStatus_t reduce_negate<float>(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream);
template ctcStatus_t reduce_negate<double>(const double *input, double *output, int rows, int cols, bool axis, hipStream_t stream);
template<typename T>
ctcStatus_t reduce_exp(const T *input, T *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::exponential<T>(), ctc_helper::add<T>(), input, output, rows, cols, axis, stream);
}
template ctcStatus_t reduce_exp<float>(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream);
template ctcStatus_t reduce_exp<double>(const double *input, double *output, int rows, int cols, bool axis, hipStream_t stream);
template<typename T>
ctcStatus_t reduce_max(const T *input, T *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::identity<T>(), ctc_helper::maximum<T>(),input, output, rows, cols, axis, stream);
}
template ctcStatus_t reduce_max<float>(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream);
template ctcStatus_t reduce_max<double>(const double *input, double *output, int rows, int cols, bool axis, hipStream_t stream);
| 6b7145d9ada815d086b1da8b03b59d1fe719ff62.cu | // Includes, system
// #include <stdio.h>
// #include <stdlib.h>
// Includes, cuda
// #include <cuda_runtime.h>
// #include <cublas_v2.h>
// Includes, cuda helper functions
// #include <helper_cuda.h>
// For the functors
#include "detail/ctc_helper.h"
#include "ctc.h"
const int warp_size = 32;
template<int NT, typename T, typename Rop>
struct CTAReduce;
template<int NT, typename T, typename Rop>
struct CTAReduce {
enum { Size = NT, Capacity = NT };
struct Storage { T shared[Capacity]; };
__device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) {
T* s = storage.shared;
s[tid] = x;
__syncthreads();
// Fold the data in half with each pass.
#pragma unroll
for(int offset = NT / 2; offset >= warp_size; offset /= 2) {
if(tid + offset < count && tid < offset) {
// Read from the right half and store to the left half.
x = g(x, s[offset + tid]);
s[tid] = x;
}
__syncthreads();
}
T shuff;
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9)
shuff = __shfl_down_sync(0xFFFFFFFF, x, offset);
#else
shuff = __shfl_down(x, offset);
#endif
if (tid + offset < count && tid < offset)
x = g(x, shuff);
}
return x;
}
};
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_rows(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
typedef CTAReduce<NT, T, Rop> R;
__shared__ typename R::Storage storage;
int tid = threadIdx.x;
int idx = tid;
int col = blockIdx.x;
T curr;
// Each block works on a column
if (idx < num_rows)
curr = f(input[idx + col*num_rows]);
idx += NT;
while (idx < num_rows) {
curr = g(curr, f(input[idx + col*num_rows]));
idx += NT;
}
// Sum thread-totals over the CTA.
curr = R::reduce(tid, curr, storage, num_rows, g);
// Store result in out
if (tid == 0)
output[col] = curr;
}
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_cols(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
__shared__ T s[NT];
int warps_per_block = NT / warp_size;
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = threadIdx.y;
T curr;
if (row < num_rows && col < num_cols) {
curr = f(input[row + col*num_rows]);
col += blockDim.y;
while (col < num_cols) {
curr = g(curr, f(input[row + col*num_rows]));
col += blockDim.y;
}
}
s[threadIdx.x * warps_per_block + threadIdx.y] = curr;
__syncthreads();
// Reduce
if (threadIdx.y == 0 && row < num_rows) {
#pragma unroll
for (int i = 1; i < warps_per_block && i < num_cols; ++i)
curr = g(curr, s[i + threadIdx.x * warps_per_block]);
output[row] = curr;
}
}
struct ReduceHelper {
template<typename T, typename Iof, typename Rof>
static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, cudaStream_t stream) {
int grid_size;
if (axis) {
grid_size = num_cols;
reduce_rows<128><<<grid_size, 128, 0, stream>>>
(f, g, input, output, num_rows, num_cols);
} else {
dim3 tpb(warp_size, 128 / warp_size);
grid_size = (num_cols + warp_size - 1)/warp_size;
reduce_cols<128><<<grid_size, tpb, 0, stream>>>
(f, g, input, output, num_rows, num_cols);
}
}
};
template<typename T, typename Iof, typename Rof>
ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, cudaStream_t stream) {
ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream);
cudaStreamSynchronize(stream);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
return CTC_STATUS_EXECUTION_FAILED;
return CTC_STATUS_SUCCESS;
}
template<typename T>
ctcStatus_t reduce_negate(const T *input, T *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::negate<T>(), ctc_helper::add<T>(), input, output, rows, cols, axis, stream);
}
template ctcStatus_t reduce_negate<float>(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream);
template ctcStatus_t reduce_negate<double>(const double *input, double *output, int rows, int cols, bool axis, cudaStream_t stream);
template<typename T>
ctcStatus_t reduce_exp(const T *input, T *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::exponential<T>(), ctc_helper::add<T>(), input, output, rows, cols, axis, stream);
}
template ctcStatus_t reduce_exp<float>(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream);
template ctcStatus_t reduce_exp<double>(const double *input, double *output, int rows, int cols, bool axis, cudaStream_t stream);
template<typename T>
ctcStatus_t reduce_max(const T *input, T *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::identity<T>(), ctc_helper::maximum<T>(),input, output, rows, cols, axis, stream);
}
template ctcStatus_t reduce_max<float>(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream);
template ctcStatus_t reduce_max<double>(const double *input, double *output, int rows, int cols, bool axis, cudaStream_t stream);
|
0db177765d1fa7b0a0ad0c220befa71371a7c5ac.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/legacy/groupby.hpp>
#include <cudf/legacy/table.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <tests/utilities/legacy/column_wrapper.cuh>
#include <tests/utilities/legacy/compare_column_wrappers.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <random>
template <typename T> using column_wrapper = cudf::test::column_wrapper<T>;
template <typename KeyType> struct SingleColumnGroupby : public GdfTest {
using Key = KeyType;
struct column_equality {
template <typename T>
bool operator()(gdf_column lhs, gdf_column rhs) const {
std::unique_ptr<column_wrapper<T>> lhs_col;
std::unique_ptr<column_wrapper<T>> rhs_col;
lhs_col.reset(new column_wrapper<T>(lhs));
rhs_col.reset(new column_wrapper<T>(rhs));
expect_columns_are_equal(*lhs_col, *rhs_col);
return true;
}
};
void expect_tables_are_equal(cudf::table const &lhs, cudf::table const &rhs) {
EXPECT_EQ(lhs.num_columns(), rhs.num_columns());
EXPECT_EQ(lhs.num_rows(), rhs.num_rows());
EXPECT_TRUE(
std::equal(lhs.begin(), lhs.end(), rhs.begin(),
[](gdf_column const *lhs_col, gdf_column const *rhs_col) {
return cudf::type_dispatcher(
lhs_col->dtype, column_equality{}, *lhs_col, *rhs_col);
}));
}
std::pair<cudf::table, gdf_column> gdf_solution(cudf::table const &input_keys,
bool ignore_null_keys) {
gdf_context context;
if (not ignore_null_keys) { // SQL
context.flag_groupby_include_nulls = true;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
} else { // PANDAS
context.flag_groupby_include_nulls = false;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
}
std::vector<int> groupby_col_indices;
for (cudf::size_type i = 0; i < input_keys.num_columns(); i++)
groupby_col_indices.push_back(i);
return gdf_group_by_without_aggregations(
input_keys, groupby_col_indices.size(), groupby_col_indices.data(),
&context);
}
inline void destroy_table(cudf::table *t) {
std::for_each(t->begin(), t->end(), [](gdf_column *col) {
gdf_column_free(col);
delete col;
});
}
void evaluate_test(column_wrapper<KeyType> keys,
column_wrapper<KeyType> sorted_keys,
column_wrapper<cudf::size_type> column_offsets,
bool ignore_null_keys = true) {
using namespace cudf::test;
cudf::table input_keys{keys.get()};
cudf::table actual_keys_table;
gdf_column column_offsets_output;
std::tie(actual_keys_table, column_offsets_output) =
gdf_solution(input_keys, ignore_null_keys);
EXPECT_EQ(hipSuccess, hipDeviceSynchronize());
cudf::table sorted_expected_keys{sorted_keys.get()};
cudf::table expected_column_offsets{column_offsets.get()};
cudf::table actual_column_offsets({&column_offsets_output});
CUDF_EXPECT_NO_THROW(
expect_tables_are_equal(actual_keys_table, sorted_expected_keys));
CUDF_EXPECT_NO_THROW(expect_tables_are_equal(actual_column_offsets,
expected_column_offsets));
destroy_table(&actual_keys_table);
gdf_column_free(&column_offsets_output);
}
};
using TestingTypes =
::testing::Types<int8_t, int32_t, int64_t, float, double, cudf::category,
cudf::date32, cudf::date64>;
TYPED_TEST_CASE(SingleColumnGroupby, TestingTypes);
TYPED_TEST(SingleColumnGroupby, OneGroupNoNullsPandasStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<cudf::size_type>({0}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, OneGroupNoNullsSqlStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = false;
this->evaluate_test(
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<cudf::size_type>({0}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, OneGroupEvenNullKeysPandasStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>(size, [key](auto index) { return key; },
[](auto index) { return index % 2; }),
column_wrapper<T>({T(key), T(key), T(key), T(key), T(key)},
[&](auto index) { return true; }),
column_wrapper<cudf::size_type>({0}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, OneGroupEvenNullKeysSqlStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = false;
this->evaluate_test(column_wrapper<T>(size, [key](auto index) { return key; },
[](auto index) { return index % 2; }),
column_wrapper<T>({T(key), T(key), T(key), T(key), T(key),
T(0), T(0), T(0), T(0), T(0)},
[&](auto index) { return index < 5; }),
column_wrapper<cudf::size_type>({0, 5}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniquePandasStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<cudf::size_type>({0, 1, 2, 3, 4, 5, 6, 7}),
ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniqueSqlStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = false;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<cudf::size_type>({0, 1, 2, 3, 4, 5, 6, 7}),
ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniqueEvenKeysNullPandasStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)},
[](auto index) { return index % 2; }),
column_wrapper<T>({T(1), T(3), T(5), T(7)},
[](auto index) { return true; }),
column_wrapper<cudf::size_type>({0, 1, 2, 3}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniqueEvenKeysNullSqlStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = false;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)},
[](auto index) { return index % 2; }),
column_wrapper<T>({T(1), T(3), T(5), T(7), T(0), T(0), T(0), T(0)},
[](auto index) { return index < 4; }), /* */
column_wrapper<cudf::size_type>({0, 1, 2, 3, 4}), ignore_null_keys);
}
| 0db177765d1fa7b0a0ad0c220befa71371a7c5ac.cu |
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/legacy/groupby.hpp>
#include <cudf/legacy/table.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <tests/utilities/legacy/column_wrapper.cuh>
#include <tests/utilities/legacy/compare_column_wrappers.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <random>
template <typename T> using column_wrapper = cudf::test::column_wrapper<T>;
template <typename KeyType> struct SingleColumnGroupby : public GdfTest {
using Key = KeyType;
struct column_equality {
template <typename T>
bool operator()(gdf_column lhs, gdf_column rhs) const {
std::unique_ptr<column_wrapper<T>> lhs_col;
std::unique_ptr<column_wrapper<T>> rhs_col;
lhs_col.reset(new column_wrapper<T>(lhs));
rhs_col.reset(new column_wrapper<T>(rhs));
expect_columns_are_equal(*lhs_col, *rhs_col);
return true;
}
};
void expect_tables_are_equal(cudf::table const &lhs, cudf::table const &rhs) {
EXPECT_EQ(lhs.num_columns(), rhs.num_columns());
EXPECT_EQ(lhs.num_rows(), rhs.num_rows());
EXPECT_TRUE(
std::equal(lhs.begin(), lhs.end(), rhs.begin(),
[](gdf_column const *lhs_col, gdf_column const *rhs_col) {
return cudf::type_dispatcher(
lhs_col->dtype, column_equality{}, *lhs_col, *rhs_col);
}));
}
std::pair<cudf::table, gdf_column> gdf_solution(cudf::table const &input_keys,
bool ignore_null_keys) {
gdf_context context;
if (not ignore_null_keys) { // SQL
context.flag_groupby_include_nulls = true;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
} else { // PANDAS
context.flag_groupby_include_nulls = false;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
}
std::vector<int> groupby_col_indices;
for (cudf::size_type i = 0; i < input_keys.num_columns(); i++)
groupby_col_indices.push_back(i);
return gdf_group_by_without_aggregations(
input_keys, groupby_col_indices.size(), groupby_col_indices.data(),
&context);
}
inline void destroy_table(cudf::table *t) {
std::for_each(t->begin(), t->end(), [](gdf_column *col) {
gdf_column_free(col);
delete col;
});
}
void evaluate_test(column_wrapper<KeyType> keys,
column_wrapper<KeyType> sorted_keys,
column_wrapper<cudf::size_type> column_offsets,
bool ignore_null_keys = true) {
using namespace cudf::test;
cudf::table input_keys{keys.get()};
cudf::table actual_keys_table;
gdf_column column_offsets_output;
std::tie(actual_keys_table, column_offsets_output) =
gdf_solution(input_keys, ignore_null_keys);
EXPECT_EQ(cudaSuccess, cudaDeviceSynchronize());
cudf::table sorted_expected_keys{sorted_keys.get()};
cudf::table expected_column_offsets{column_offsets.get()};
cudf::table actual_column_offsets({&column_offsets_output});
CUDF_EXPECT_NO_THROW(
expect_tables_are_equal(actual_keys_table, sorted_expected_keys));
CUDF_EXPECT_NO_THROW(expect_tables_are_equal(actual_column_offsets,
expected_column_offsets));
destroy_table(&actual_keys_table);
gdf_column_free(&column_offsets_output);
}
};
using TestingTypes =
::testing::Types<int8_t, int32_t, int64_t, float, double, cudf::category,
cudf::date32, cudf::date64>;
TYPED_TEST_CASE(SingleColumnGroupby, TestingTypes);
TYPED_TEST(SingleColumnGroupby, OneGroupNoNullsPandasStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<cudf::size_type>({0}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, OneGroupNoNullsSqlStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = false;
this->evaluate_test(
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<T>(size, [key](auto index) { return key; }),
column_wrapper<cudf::size_type>({0}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, OneGroupEvenNullKeysPandasStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>(size, [key](auto index) { return key; },
[](auto index) { return index % 2; }),
column_wrapper<T>({T(key), T(key), T(key), T(key), T(key)},
[&](auto index) { return true; }),
column_wrapper<cudf::size_type>({0}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, OneGroupEvenNullKeysSqlStyle) {
constexpr int size{10};
using T = typename SingleColumnGroupby<TypeParam>::Key;
T key{42};
bool ignore_null_keys = false;
this->evaluate_test(column_wrapper<T>(size, [key](auto index) { return key; },
[](auto index) { return index % 2; }),
column_wrapper<T>({T(key), T(key), T(key), T(key), T(key),
T(0), T(0), T(0), T(0), T(0)},
[&](auto index) { return index < 5; }),
column_wrapper<cudf::size_type>({0, 5}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniquePandasStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<cudf::size_type>({0, 1, 2, 3, 4, 5, 6, 7}),
ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniqueSqlStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = false;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)}),
column_wrapper<cudf::size_type>({0, 1, 2, 3, 4, 5, 6, 7}),
ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniqueEvenKeysNullPandasStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = true;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)},
[](auto index) { return index % 2; }),
column_wrapper<T>({T(1), T(3), T(5), T(7)},
[](auto index) { return true; }),
column_wrapper<cudf::size_type>({0, 1, 2, 3}), ignore_null_keys);
}
TYPED_TEST(SingleColumnGroupby, EightKeysAllUniqueEvenKeysNullSqlStyle) {
using T = typename SingleColumnGroupby<TypeParam>::Key;
bool ignore_null_keys = false;
this->evaluate_test(
column_wrapper<T>({T(0), T(1), T(2), T(3), T(4), T(5), T(6), T(7)},
[](auto index) { return index % 2; }),
column_wrapper<T>({T(1), T(3), T(5), T(7), T(0), T(0), T(0), T(0)},
[](auto index) { return index < 4; }), /* */
column_wrapper<cudf::size_type>({0, 1, 2, 3, 4}), ignore_null_keys);
}
|
25ef0379f748ecdbf4620e3b6dee0e4bafa87693.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "poly_overlaps.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#include <cstdio>
#include<algorithm>
using namespace std;
//##define CUDA_CHECK(condition)\
//
// do {
// hipError_t error = condition;
// if (error != hipSuccess) {
//
// }
// }
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
#define maxn 510
const double eps=1E-8;
__device__ inline int sig(float d){
return(d>eps)-(d<-eps);
}
// struct Point{
// double x,y; Point(){}
// Point(double x,double y):x(x),y(y){}
// bool operator==(const Point&p)const{
// return sig(x-p.x)==0&&sig(y-p.y)==0;
// }
// };
__device__ inline int point_eq(const float2 a, const float2 b) {
return (sig(a.x - b.x) == 0) && (sig(a.y - b.y)==0);
}
__device__ inline void point_swap(float2 *a, float2 *b) {
float2 temp = *a;
*a = *b;
*b = temp;
}
__device__ inline void point_reverse(float2 *first, float2* last)
{
while ((first!=last)&&(first!=--last)) {
point_swap (first,last);
++first;
}
}
// void point_reverse(Point* first, Point* last)
// {
// while ((first!=last)&&(first!=--last)) {
// point_swap (first,last);
// ++first;
// }
// }
__device__ inline float cross(float2 o,float2 a,float2 b){ //
return(a.x-o.x)*(b.y-o.y)-(b.x-o.x)*(a.y-o.y);
}
__device__ inline float area(float2* ps,int n){
ps[n]=ps[0];
float res=0;
for(int i=0;i<n;i++){
res+=ps[i].x*ps[i+1].y-ps[i].y*ps[i+1].x;
}
return res/2.0;
}
__device__ inline int lineCross(float2 a,float2 b,float2 c,float2 d,float2&p){
float s1,s2;
s1=cross(a,b,c);
s2=cross(a,b,d);
if(sig(s1)==0&&sig(s2)==0) return 2;
if(sig(s2-s1)==0) return 0;
p.x=(c.x*s2-d.x*s1)/(s2-s1);
p.y=(c.y*s2-d.y*s1)/(s2-s1);
return 1;
}
//
//abp(a,b)
//,n1
// __device__ inline void polygon_cut(float2*p,int&n,float2 a,float2 b){
// // TODO: The static variable may be the reason, why single thread is ok, multiple threads are not work
// printf("polygon_cut, offset\n");
// static float2 pp[maxn];
// int m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(a,b,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(a,b,p[i]))!=sig(cross(a,b,p[i+1])))
// lineCross(a,b,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// // while(n>1&&p[n-1]==p[0])n--;
// while(n>1&&point_eq(p[n-1], p[0]))n--;
// // int x = blockIdx.x * blockDim.x + threadIdx.x;
// // // corresponding to k
// // int y = blockIdx.y * blockDim.y + threadIdx.y;
// // int offset = x * 1 + y;
// // printf("polygon_cut, offset\n");
// }
__device__ inline void polygon_cut(float2*p,int&n,float2 a,float2 b, float2* pp){
// TODO: The static variable may be the reason, why single thread is ok, multiple threads are not work
// printf("polygon_cut, offset\n");
// static float2 pp[maxn];
int m=0;p[n]=p[0];
for(int i=0;i<n;i++){
if(sig(cross(a,b,p[i]))>0) pp[m++]=p[i];
if(sig(cross(a,b,p[i]))!=sig(cross(a,b,p[i+1])))
lineCross(a,b,p[i],p[i+1],pp[m++]);
}
n=0;
for(int i=0;i<m;i++)
if(!i||!(point_eq(pp[i], pp[i-1])))
p[n++]=pp[i];
// while(n>1&&p[n-1]==p[0])n--;
while(n>1&&point_eq(p[n-1], p[0]))n--;
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// // corresponding to k
// int y = blockIdx.y * blockDim.y + threadIdx.y;
// int offset = x * 1 + y;
// printf("polygon_cut, offset\n");
}
//--------------------------------//
//oabocd,o//
__device__ inline float intersectArea(float2 a,float2 b,float2 c,float2 d){
float2 o = make_float2(0,0);
int s1=sig(cross(o,a,b));
int s2=sig(cross(o,c,d));
if(s1==0||s2==0)return 0.0;//0
// if(s1==-1) swap(a,b);
// if(s2==-1) swap(c,d);
// printf("before swap\n");
// printf("a.x %f, a.y %f\n", a.x, a.y);
// printf("b.x %f, b.y %f\n", b.x, b.y);
if(s1 == -1) point_swap(&a, &b);
// printf("a.x %f, a.y %f\n", a.x, a.y);
// printf("b.x %f, b.y %f\n", b.x, b.y);
// printf("after swap\n");
if(s2 == -1) point_swap(&c, &d);
float2 p[10]={o,a,b};
int n=3;
// // manually implement polygon_cut(p, n, a, b)
// float2 pp[maxn];
// // polygon_cut(p, n, o, c)
// int m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(o,c,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(o,c,p[i]))!=sig(cross(o,c,p[i+1])))
// lineCross(o,c,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// while(n>1&&point_eq(p[n-1], p[0]))n--;
// // polygon_cut(p, n, c, d)
// m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(c,d,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(c,d,p[i]))!=sig(cross(c,d,p[i+1])))
// lineCross(c,d,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// while(n>1&&point_eq(p[n-1], p[0]))n--;
// // polygon_cut(p, n, d, o)
// m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(d,o,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(d,o,p[i]))!=sig(cross(d,o,p[i+1])))
// lineCross(d,o,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// while(n>1&&point_eq(p[n-1], p[0]))n--;
float2 pp[maxn];
polygon_cut(p,n,o,c,pp);
polygon_cut(p,n,c,d,pp);
polygon_cut(p,n,d,o,pp);
float res=fabs(area(p,n));
int x = blockIdx.x * blockDim.x + threadIdx.x;
// corresponding to k
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x * 1 + y;
// printf("intersectArea2, offset: %d, %f, %f, %f, %f, %f, %f, %f, %f, res: %f\n", offset, a.x, a.y, b.x, b.y, c.x, c.y, d.x, d.y, res);
if(s1*s2==-1) res=-res;return res;
}
//
// TODO: here changed the input, this need to be debug
__device__ inline float intersectArea(float2*ps1,int n1,float2*ps2,int n2){
int x = blockIdx.x * blockDim.x + threadIdx.x;
// corresponding to k
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x * 1 + y;
if(area(ps1,n1)<0) point_reverse(ps1,ps1+n1);
if(area(ps2,n2)<0) point_reverse(ps2,ps2+n2);
ps1[n1]=ps1[0];
ps2[n2]=ps2[0];
float res=0;
for(int i=0;i<n1;i++){
for(int j=0;j<n2;j++){
// printf("offset: %d, %f, %f, %f, %f, %f, %f, %f, %f addArea: %f \n",
// offset, ps1[i].x, ps1[i].y, ps1[i + 1].x, ps1[i + 1].y, ps2[j].x, ps2[j].y,
// ps2[j + 1].x, ps2[j + 1].y, intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1]));
// float2 a = ps1[i];
// float2 b = ps1[i+1];
// float2 c = ps2[j];
// float2 d = ps2[j+1];
// res+=intersectArea2(a,b,c,d);
res+=intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1]);
}
}
return res;//assumeresispositive!
}
//__device__ inline double iou_poly(vector<double> p, vector<double> q) {
// Point ps1[maxn],ps2[maxn];
// int n1 = 4;
// int n2 = 4;
// for (int i = 0; i < 4; i++) {
// ps1[i].x = p[i * 2];
// ps1[i].y = p[i * 2 + 1];
//
// ps2[i].x = q[i * 2];
// ps2[i].y = q[i * 2 + 1];
// }
// double inter_area = intersectArea(ps1, n1, ps2, n2);
// double union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area;
// double iou = inter_area / union_area;
//
//// cout << "inter_area:" << inter_area << endl;
//// cout << "union_area:" << union_area << endl;
//// cout << "iou:" << iou << endl;
//
// return iou;
//}
__device__ inline void RotBox2Poly(float const * const dbox, float2 * ps) {
float cs = cos(dbox[4]);
float ss = sin(dbox[4]);
float w = dbox[2];
float h = dbox[3];
float x_ctr = dbox[0];
float y_ctr = dbox[1];
ps[0].x = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0);
ps[1].x = x_ctr + cs * (w / 2.0) - ss * (h / 2.0);
ps[2].x = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0);
ps[3].x = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0);
ps[0].y = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0);
ps[1].y = y_ctr + ss * (w / 2.0) + cs * (h / 2.0);
ps[2].y = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0);
ps[3].y = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0);
}
__device__ inline float devPolyIoU(float const * const dbbox1, float const * const dbbox2) {
float2 ps1[maxn], ps2[maxn];
int n1 = 4;
int n2 = 4;
RotBox2Poly(dbbox1, ps1);
RotBox2Poly(dbbox2, ps2);
// printf("ps1: %f, %f, %f, %f, %f, %f, %f, %f\n", ps1[0].x, ps1[0].y, ps1[1].x, ps1[1].y, ps1[2].x, ps1[2].y, ps1[3].x, ps1[3].y);
// printf("ps2: %f, %f, %f, %f, %f, %f, %f, %f\n", ps2[0].x, ps2[0].y, ps2[1].x, ps2[1].y, ps2[2].x, ps2[2].y, ps2[3].x, ps2[3].y);
float inter_area = intersectArea(ps1, n1, ps2, n2);
//printf("inter_area: %f \n", inter_area);
float union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area;
//printf("before union_area\n");
//printf("union_area: %f \n", union_area);
float iou = 0;
if (union_area == 0) {
iou = (inter_area + 1) / (union_area + 1);
} else {
iou = inter_area / union_area;
}
// printf("iou: %f \n", iou);
return iou;
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
// const int col_start = blockIdx.y;
// const int row_start = blockIdx.x;
// corresponding to n
int x = blockIdx.x * blockDim.x + threadIdx.x;
// corresponding to k
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < N) && (y < K)) {
int offset = x * K + y;
//printf
// printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_boxes + x*5)[0],
// (dev_boxes + x*5)[1], (dev_boxes + x*5)[2], (dev_boxes + x*5)[3],
// (dev_boxes + x*5)[4] );
// printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_query_boxes + y*5)[0],
// (dev_query_boxes + y*5)[1], (dev_query_boxes + y*5)[2], (dev_query_boxes + y*5)[3],
// (dev_query_boxes + y*5)[4] );
dev_overlaps[offset] = devPolyIoU(dev_boxes + x * 5, dev_query_boxes + y * 5);
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(hipMalloc(&boxes_dev,
n * 5 * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes,
n * 5 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&query_boxes_dev,
k * 5 * sizeof(float)));
CUDA_CHECK(hipMemcpy(query_boxes_dev,
query_boxes,
k * 5 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&overlaps_dev,
n * k * sizeof(float)));
if (true){}
dim3 blocks(DIVUP(n, 32),
DIVUP(k, 32));
dim3 threads(32, 32);
hipLaunchKernelGGL(( overlaps_kernel), dim3(blocks), dim3(threads), 0, 0, n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(hipMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(overlaps_dev));
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(query_boxes_dev));
}
| 25ef0379f748ecdbf4620e3b6dee0e4bafa87693.cu |
#include "poly_overlaps.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#include <cstdio>
#include<algorithm>
using namespace std;
//##define CUDA_CHECK(condition)\
//
// do {
// cudaError_t error = condition;
// if (error != cudaSuccess) {
//
// }
// }
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
#define maxn 510
const double eps=1E-8;
__device__ inline int sig(float d){
return(d>eps)-(d<-eps);
}
// struct Point{
// double x,y; Point(){}
// Point(double x,double y):x(x),y(y){}
// bool operator==(const Point&p)const{
// return sig(x-p.x)==0&&sig(y-p.y)==0;
// }
// };
__device__ inline int point_eq(const float2 a, const float2 b) {
return (sig(a.x - b.x) == 0) && (sig(a.y - b.y)==0);
}
__device__ inline void point_swap(float2 *a, float2 *b) {
float2 temp = *a;
*a = *b;
*b = temp;
}
__device__ inline void point_reverse(float2 *first, float2* last)
{
while ((first!=last)&&(first!=--last)) {
point_swap (first,last);
++first;
}
}
// void point_reverse(Point* first, Point* last)
// {
// while ((first!=last)&&(first!=--last)) {
// point_swap (first,last);
// ++first;
// }
// }
__device__ inline float cross(float2 o,float2 a,float2 b){ //叉积
return(a.x-o.x)*(b.y-o.y)-(b.x-o.x)*(a.y-o.y);
}
__device__ inline float area(float2* ps,int n){
ps[n]=ps[0];
float res=0;
for(int i=0;i<n;i++){
res+=ps[i].x*ps[i+1].y-ps[i].y*ps[i+1].x;
}
return res/2.0;
}
__device__ inline int lineCross(float2 a,float2 b,float2 c,float2 d,float2&p){
float s1,s2;
s1=cross(a,b,c);
s2=cross(a,b,d);
if(sig(s1)==0&&sig(s2)==0) return 2;
if(sig(s2-s1)==0) return 0;
p.x=(c.x*s2-d.x*s1)/(s2-s1);
p.y=(c.y*s2-d.y*s1)/(s2-s1);
return 1;
}
//多边形切割
//用直线ab切割多边形p,切割后的在向量(a,b)的左侧,并原地保存切割结果
//如果退化为一个点,也会返回去,此时n为1
// __device__ inline void polygon_cut(float2*p,int&n,float2 a,float2 b){
// // TODO: The static variable may be the reason, why single thread is ok, multiple threads are not work
// printf("polygon_cut, offset\n");
// static float2 pp[maxn];
// int m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(a,b,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(a,b,p[i]))!=sig(cross(a,b,p[i+1])))
// lineCross(a,b,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// // while(n>1&&p[n-1]==p[0])n--;
// while(n>1&&point_eq(p[n-1], p[0]))n--;
// // int x = blockIdx.x * blockDim.x + threadIdx.x;
// // // corresponding to k
// // int y = blockIdx.y * blockDim.y + threadIdx.y;
// // int offset = x * 1 + y;
// // printf("polygon_cut, offset\n");
// }
__device__ inline void polygon_cut(float2*p,int&n,float2 a,float2 b, float2* pp){
// TODO: The static variable may be the reason, why single thread is ok, multiple threads are not work
// printf("polygon_cut, offset\n");
// static float2 pp[maxn];
int m=0;p[n]=p[0];
for(int i=0;i<n;i++){
if(sig(cross(a,b,p[i]))>0) pp[m++]=p[i];
if(sig(cross(a,b,p[i]))!=sig(cross(a,b,p[i+1])))
lineCross(a,b,p[i],p[i+1],pp[m++]);
}
n=0;
for(int i=0;i<m;i++)
if(!i||!(point_eq(pp[i], pp[i-1])))
p[n++]=pp[i];
// while(n>1&&p[n-1]==p[0])n--;
while(n>1&&point_eq(p[n-1], p[0]))n--;
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// // corresponding to k
// int y = blockIdx.y * blockDim.y + threadIdx.y;
// int offset = x * 1 + y;
// printf("polygon_cut, offset\n");
}
//---------------华丽的分隔线-----------------//
//返回三角形oab和三角形ocd的有向交面积,o是原点//
__device__ inline float intersectArea(float2 a,float2 b,float2 c,float2 d){
float2 o = make_float2(0,0);
int s1=sig(cross(o,a,b));
int s2=sig(cross(o,c,d));
if(s1==0||s2==0)return 0.0;//退化,面积为0
// if(s1==-1) swap(a,b);
// if(s2==-1) swap(c,d);
// printf("before swap\n");
// printf("a.x %f, a.y %f\n", a.x, a.y);
// printf("b.x %f, b.y %f\n", b.x, b.y);
if(s1 == -1) point_swap(&a, &b);
// printf("a.x %f, a.y %f\n", a.x, a.y);
// printf("b.x %f, b.y %f\n", b.x, b.y);
// printf("after swap\n");
if(s2 == -1) point_swap(&c, &d);
float2 p[10]={o,a,b};
int n=3;
// // manually implement polygon_cut(p, n, a, b)
// float2 pp[maxn];
// // polygon_cut(p, n, o, c)
// int m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(o,c,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(o,c,p[i]))!=sig(cross(o,c,p[i+1])))
// lineCross(o,c,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// while(n>1&&point_eq(p[n-1], p[0]))n--;
// // polygon_cut(p, n, c, d)
// m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(c,d,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(c,d,p[i]))!=sig(cross(c,d,p[i+1])))
// lineCross(c,d,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// while(n>1&&point_eq(p[n-1], p[0]))n--;
// // polygon_cut(p, n, d, o)
// m=0;p[n]=p[0];
// for(int i=0;i<n;i++){
// if(sig(cross(d,o,p[i]))>0) pp[m++]=p[i];
// if(sig(cross(d,o,p[i]))!=sig(cross(d,o,p[i+1])))
// lineCross(d,o,p[i],p[i+1],pp[m++]);
// }
// n=0;
// for(int i=0;i<m;i++)
// if(!i||!(point_eq(pp[i], pp[i-1])))
// p[n++]=pp[i];
// while(n>1&&point_eq(p[n-1], p[0]))n--;
float2 pp[maxn];
polygon_cut(p,n,o,c,pp);
polygon_cut(p,n,c,d,pp);
polygon_cut(p,n,d,o,pp);
float res=fabs(area(p,n));
int x = blockIdx.x * blockDim.x + threadIdx.x;
// corresponding to k
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x * 1 + y;
// printf("intersectArea2, offset: %d, %f, %f, %f, %f, %f, %f, %f, %f, res: %f\n", offset, a.x, a.y, b.x, b.y, c.x, c.y, d.x, d.y, res);
if(s1*s2==-1) res=-res;return res;
}
//求两多边形的交面积
// TODO: here changed the input, this need to be debug
__device__ inline float intersectArea(float2*ps1,int n1,float2*ps2,int n2){
int x = blockIdx.x * blockDim.x + threadIdx.x;
// corresponding to k
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x * 1 + y;
if(area(ps1,n1)<0) point_reverse(ps1,ps1+n1);
if(area(ps2,n2)<0) point_reverse(ps2,ps2+n2);
ps1[n1]=ps1[0];
ps2[n2]=ps2[0];
float res=0;
for(int i=0;i<n1;i++){
for(int j=0;j<n2;j++){
// printf("offset: %d, %f, %f, %f, %f, %f, %f, %f, %f addArea: %f \n",
// offset, ps1[i].x, ps1[i].y, ps1[i + 1].x, ps1[i + 1].y, ps2[j].x, ps2[j].y,
// ps2[j + 1].x, ps2[j + 1].y, intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1]));
// float2 a = ps1[i];
// float2 b = ps1[i+1];
// float2 c = ps2[j];
// float2 d = ps2[j+1];
// res+=intersectArea2(a,b,c,d);
res+=intersectArea(ps1[i],ps1[i+1],ps2[j],ps2[j+1]);
}
}
return res;//assumeresispositive!
}
//__device__ inline double iou_poly(vector<double> p, vector<double> q) {
// Point ps1[maxn],ps2[maxn];
// int n1 = 4;
// int n2 = 4;
// for (int i = 0; i < 4; i++) {
// ps1[i].x = p[i * 2];
// ps1[i].y = p[i * 2 + 1];
//
// ps2[i].x = q[i * 2];
// ps2[i].y = q[i * 2 + 1];
// }
// double inter_area = intersectArea(ps1, n1, ps2, n2);
// double union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area;
// double iou = inter_area / union_area;
//
//// cout << "inter_area:" << inter_area << endl;
//// cout << "union_area:" << union_area << endl;
//// cout << "iou:" << iou << endl;
//
// return iou;
//}
__device__ inline void RotBox2Poly(float const * const dbox, float2 * ps) {
float cs = cos(dbox[4]);
float ss = sin(dbox[4]);
float w = dbox[2];
float h = dbox[3];
float x_ctr = dbox[0];
float y_ctr = dbox[1];
ps[0].x = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0);
ps[1].x = x_ctr + cs * (w / 2.0) - ss * (h / 2.0);
ps[2].x = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0);
ps[3].x = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0);
ps[0].y = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0);
ps[1].y = y_ctr + ss * (w / 2.0) + cs * (h / 2.0);
ps[2].y = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0);
ps[3].y = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0);
}
__device__ inline float devPolyIoU(float const * const dbbox1, float const * const dbbox2) {
float2 ps1[maxn], ps2[maxn];
int n1 = 4;
int n2 = 4;
RotBox2Poly(dbbox1, ps1);
RotBox2Poly(dbbox2, ps2);
// printf("ps1: %f, %f, %f, %f, %f, %f, %f, %f\n", ps1[0].x, ps1[0].y, ps1[1].x, ps1[1].y, ps1[2].x, ps1[2].y, ps1[3].x, ps1[3].y);
// printf("ps2: %f, %f, %f, %f, %f, %f, %f, %f\n", ps2[0].x, ps2[0].y, ps2[1].x, ps2[1].y, ps2[2].x, ps2[2].y, ps2[3].x, ps2[3].y);
float inter_area = intersectArea(ps1, n1, ps2, n2);
//printf("inter_area: %f \n", inter_area);
float union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area;
//printf("before union_area\n");
//printf("union_area: %f \n", union_area);
float iou = 0;
if (union_area == 0) {
iou = (inter_area + 1) / (union_area + 1);
} else {
iou = inter_area / union_area;
}
// printf("iou: %f \n", iou);
return iou;
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
// const int col_start = blockIdx.y;
// const int row_start = blockIdx.x;
// corresponding to n
int x = blockIdx.x * blockDim.x + threadIdx.x;
// corresponding to k
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < N) && (y < K)) {
int offset = x * K + y;
//printf
// printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_boxes + x*5)[0],
// (dev_boxes + x*5)[1], (dev_boxes + x*5)[2], (dev_boxes + x*5)[3],
// (dev_boxes + x*5)[4] );
// printf("offset: %d dbbox: %f %f %f %f %f\n", offset, (dev_query_boxes + y*5)[0],
// (dev_query_boxes + y*5)[1], (dev_query_boxes + y*5)[2], (dev_query_boxes + y*5)[3],
// (dev_query_boxes + y*5)[4] );
dev_overlaps[offset] = devPolyIoU(dev_boxes + x * 5, dev_query_boxes + y * 5);
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(cudaMalloc(&boxes_dev,
n * 5 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes,
n * 5 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&query_boxes_dev,
k * 5 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(query_boxes_dev,
query_boxes,
k * 5 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&overlaps_dev,
n * k * sizeof(float)));
if (true){}
dim3 blocks(DIVUP(n, 32),
DIVUP(k, 32));
dim3 threads(32, 32);
overlaps_kernel<<<blocks, threads>>>(n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(cudaMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(overlaps_dev));
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(query_boxes_dev));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.