hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
67e4262a20ad77c4d595af79bf7bcc46e0b7df40.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
#include <xmmintrin.h>
// includes, system
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// includes, CUDA
#include <builtin_types.h>
#include <hip/hip_fp16.h>
// Complex data type
typedef half2 halfComplex;
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *,
int, float);
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
//#define SIGNAL_SIZE 50
//#define SIGNAL_SIZE 1024
//#define SIGNAL_SIZE 8192
//#define SIGNAL_SIZE 256
#define SIGNAL_SIZE 0x400000
#define BATCH 16
halfComplex h_signal_half[SIGNAL_SIZE * BATCH];
halfComplex h_convolved_signal_half[SIGNAL_SIZE * BATCH];
//Complex h_signal[SIGNAL_SIZE * BATCH];
//Complex h_convolved_signal[SIGNAL_SIZE * BATCH];
int16_t h_signal16[SIGNAL_SIZE*2*BATCH];
int16_t h_convolved_signal16[SIGNAL_SIZE*2 * BATCH];
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li))
printf("QueryPerformanceFrequency failed!\n");
PCFreq = (double)(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return (double)(li.QuadPart - CounterStart) / PCFreq;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) { runTest(argc, argv); }
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv) {
printf("[simpleCUFFT] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
// Complex *h_signal =
// reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE));
int mem_size = sizeof(hipfftComplex) * SIGNAL_SIZE * BATCH;
int mem_size_half = sizeof(hipfftComplex) * (SIGNAL_SIZE/2) * BATCH;
// host arrays
Complex *h_PinnedSignal, *h_PinnedConvolvedSignal;
halfComplex* h_PinnedSignal_half, * h_PinnedConvolvedSignal_half;
// allocate and initialize
checkCudaErrors(hipHostMalloc((void**)&h_PinnedSignal, mem_size)); // host pinned
checkCudaErrors(hipHostMalloc((void**)&h_PinnedConvolvedSignal, mem_size)); // host pinned
checkCudaErrors(hipHostMalloc((void**)&h_PinnedSignal_half, mem_size_half)); // host pinned
checkCudaErrors(hipHostMalloc((void**)&h_PinnedConvolvedSignal_half, mem_size_half)); // host pinned
// Initialize the memory for the signal
halfComplex* p_half;
p_half = (halfComplex*)h_signal16;
for (unsigned int i = 0; i < SIGNAL_SIZE*2*BATCH; i+=2) {
h_signal16[i] = 0;
h_signal16[i+1] = 0;
}
for (unsigned int i = 0; i < BATCH * 2; i += 2) {
h_signal16[i] = i + 1;
h_signal16[i + 1] = 0;
}
/*
for (unsigned int i = 0; i < SIGNAL_SIZE*BATCH; i++) {
h_signal[i].x = p_half[i].x;
h_signal[i].y = p_half[i].y;
}
*/
StartCounter();
// Initialize the memory for the signal
#pragma loop(hint_parallel(0))
for (unsigned int i = 0; i < SIGNAL_SIZE*BATCH; ++i) {
//h_signal[i].x = rand() / static_cast<float>(RAND_MAX);
//h_signal[i].y = 0;
// h_signal_half[i].x = (half)i;
// h_signal_half[i].y = (half)0;
//h_signal[i].x = h_signal_half[i].x;
//h_signal[i].y = h_signal_half[i].y;
h_PinnedSignal_half[i].x = h_signal16[2*i];
h_PinnedSignal_half[i].y = h_signal16[2*i+1];
}
float xx = h_PinnedSignal_half[0].x;
xx/=32768;
h_PinnedSignal_half[0].x = xx;
float yy = h_PinnedSignal_half[0].x;
double copytime = GetCounter();
printf("----- Int2float time %ld Bytes is: %0.3f milliseconds \n", mem_size, copytime);
printf("--------------------------------------------- \n");
// Allocate device memory for signal
halfComplex *d_signal;
halfComplex* r_signal;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_signal), mem_size_half));
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&r_signal), mem_size_half));
StartCounter();
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_signal, h_signal_half, mem_size_half, hipMemcpyHostToDevice));
copytime = GetCounter();
printf("---- Copy time %ld Bytes is: %0.3f milliseconds \n", mem_size_half, copytime);
printf("--------------------------------------------- \n");
StartCounter();
// Copy host memory to device
//checkCudaErrors(hipMemcpy(d_signal, h_PinnedSignal_half, mem_size_half, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_signal, p_half, mem_size_half, hipMemcpyHostToDevice));
copytime = GetCounter();
printf("---- Pinned Memory Copy time %ld Bytes is: %0.3f milliseconds \n", mem_size, copytime);
printf("--------------------------------------------- \n");
// CUFFT plan simple API
hipfftHandle plan;
//checkCudaErrors(hipfftPlan1d(&plan, SIGNAL_SIZE, HIPFFT_C2C, 1));
checkCudaErrors(hipfftCreate(&plan));
int rank = 1;
//long long int n = 65536*8*2*4;// 0x1000000;// SIGNAL_SIZE;
/*
long long int n = SIGNAL_SIZE/2;
long long int inembed[] = { 0 };
long long int istride = 1;
long long int idist = n;
hipDataType inputtype = HIP_C_16F;
long long int onembed[] = { 0 };
long long int ostride = 1;
long long int odist = n;
hipDataType outputtype = HIP_C_16F;
*/
long long int n = SIGNAL_SIZE / 2;
long long int inembed[] = { 0 };
long long int istride = 2;
long long int idist = 1;
hipDataType inputtype = HIP_C_16F;
long long int onembed[] = { 0 };
long long int ostride = 1;
long long int odist = n;
hipDataType outputtype = HIP_C_16F;
long long int batch = BATCH;// 1;
size_t workSize;
hipDataType executiontype = HIP_C_16F;
checkCudaErrors(cufftXtMakePlanMany(plan, rank, &n,
inembed, istride, idist, inputtype,
onembed, ostride, odist, outputtype,
batch, &workSize,
executiontype));
// Transform signal and kernel
printf("Transforming signal cufftXtExec\n");
// timer init
hipEvent_t start, stop;
float gpuTime = 0.0f;
hipEventCreate(&start, 0);
hipEventCreate(&stop, 0);
hipEventRecord(start, 0);
hipEventSynchronize(start);
StartCounter();
checkCudaErrors(cufftXtExec(plan, d_signal, r_signal, HIPFFT_FORWARD));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
double ttime = GetCounter();
hipEventElapsedTime(&gpuTime, start, stop);
printf("---- time: %.10f milliseconds\n", gpuTime);
printf("---- Execution time is: %0.3f milliseconds \n", ttime);
printf("--------------------------------------------- \n");
// Copy device memory to host
checkCudaErrors(hipMemcpy(h_convolved_signal_half, r_signal, mem_size_half,
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_signal_half, d_signal, mem_size_half,
hipMemcpyDeviceToHost));
for (int ii = 0; ii < SIGNAL_SIZE; ii++) {
h_convolved_signal[ii].x = h_convolved_signal_half[ii].x;
h_convolved_signal[ii].y = h_convolved_signal_half[ii].y;
}
// Check if kernel execution generated and error
getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
printf("Transforming signal back hipfftExecC2C\n");
checkCudaErrors(cufftXtExec(plan, r_signal, d_signal, HIPFFT_BACKWARD));
// Copy device memory to host
checkCudaErrors(hipMemcpy(h_convolved_signal_half, d_signal, mem_size_half,
hipMemcpyDeviceToHost));
// Destroy CUFFT context
checkCudaErrors(hipfftDestroy(plan));
// cleanup memory
checkCudaErrors(hipFree(d_signal));
checkCudaErrors(hipFree(r_signal));
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
/*
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) {
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s) {
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) {
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b,
int size, float scale) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads) {
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
}
*/ | 67e4262a20ad77c4d595af79bf7bcc46e0b7df40.cu | /**
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
#include <xmmintrin.h>
// includes, system
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// includes, CUDA
#include <builtin_types.h>
#include <cuda_fp16.h>
// Complex data type
typedef half2 halfComplex;
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *,
int, float);
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
//#define SIGNAL_SIZE 50
//#define SIGNAL_SIZE 1024
//#define SIGNAL_SIZE 8192
//#define SIGNAL_SIZE 256
#define SIGNAL_SIZE 0x400000
#define BATCH 16
halfComplex h_signal_half[SIGNAL_SIZE * BATCH];
halfComplex h_convolved_signal_half[SIGNAL_SIZE * BATCH];
//Complex h_signal[SIGNAL_SIZE * BATCH];
//Complex h_convolved_signal[SIGNAL_SIZE * BATCH];
int16_t h_signal16[SIGNAL_SIZE*2*BATCH];
int16_t h_convolved_signal16[SIGNAL_SIZE*2 * BATCH];
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li))
printf("QueryPerformanceFrequency failed!\n");
PCFreq = (double)(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return (double)(li.QuadPart - CounterStart) / PCFreq;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) { runTest(argc, argv); }
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv) {
printf("[simpleCUFFT] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
// Complex *h_signal =
// reinterpret_cast<Complex *>(malloc(sizeof(Complex) * SIGNAL_SIZE));
int mem_size = sizeof(cufftComplex) * SIGNAL_SIZE * BATCH;
int mem_size_half = sizeof(cufftComplex) * (SIGNAL_SIZE/2) * BATCH;
// host arrays
Complex *h_PinnedSignal, *h_PinnedConvolvedSignal;
halfComplex* h_PinnedSignal_half, * h_PinnedConvolvedSignal_half;
// allocate and initialize
checkCudaErrors(cudaMallocHost((void**)&h_PinnedSignal, mem_size)); // host pinned
checkCudaErrors(cudaMallocHost((void**)&h_PinnedConvolvedSignal, mem_size)); // host pinned
checkCudaErrors(cudaMallocHost((void**)&h_PinnedSignal_half, mem_size_half)); // host pinned
checkCudaErrors(cudaMallocHost((void**)&h_PinnedConvolvedSignal_half, mem_size_half)); // host pinned
// Initialize the memory for the signal
halfComplex* p_half;
p_half = (halfComplex*)h_signal16;
for (unsigned int i = 0; i < SIGNAL_SIZE*2*BATCH; i+=2) {
h_signal16[i] = 0;
h_signal16[i+1] = 0;
}
for (unsigned int i = 0; i < BATCH * 2; i += 2) {
h_signal16[i] = i + 1;
h_signal16[i + 1] = 0;
}
/*
for (unsigned int i = 0; i < SIGNAL_SIZE*BATCH; i++) {
h_signal[i].x = p_half[i].x;
h_signal[i].y = p_half[i].y;
}
*/
StartCounter();
// Initialize the memory for the signal
#pragma loop(hint_parallel(0))
for (unsigned int i = 0; i < SIGNAL_SIZE*BATCH; ++i) {
//h_signal[i].x = rand() / static_cast<float>(RAND_MAX);
//h_signal[i].y = 0;
// h_signal_half[i].x = (half)i;
// h_signal_half[i].y = (half)0;
//h_signal[i].x = h_signal_half[i].x;
//h_signal[i].y = h_signal_half[i].y;
h_PinnedSignal_half[i].x = h_signal16[2*i];
h_PinnedSignal_half[i].y = h_signal16[2*i+1];
}
float xx = h_PinnedSignal_half[0].x;
xx/=32768;
h_PinnedSignal_half[0].x = xx;
float yy = h_PinnedSignal_half[0].x;
double copytime = GetCounter();
printf("----- Int2float time %ld Bytes is: %0.3f milliseconds \n", mem_size, copytime);
printf("--------------------------------------------- \n");
// Allocate device memory for signal
halfComplex *d_signal;
halfComplex* r_signal;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_signal), mem_size_half));
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&r_signal), mem_size_half));
StartCounter();
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_signal, h_signal_half, mem_size_half, cudaMemcpyHostToDevice));
copytime = GetCounter();
printf("---- Copy time %ld Bytes is: %0.3f milliseconds \n", mem_size_half, copytime);
printf("--------------------------------------------- \n");
StartCounter();
// Copy host memory to device
//checkCudaErrors(cudaMemcpy(d_signal, h_PinnedSignal_half, mem_size_half, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_signal, p_half, mem_size_half, cudaMemcpyHostToDevice));
copytime = GetCounter();
printf("---- Pinned Memory Copy time %ld Bytes is: %0.3f milliseconds \n", mem_size, copytime);
printf("--------------------------------------------- \n");
// CUFFT plan simple API
cufftHandle plan;
//checkCudaErrors(cufftPlan1d(&plan, SIGNAL_SIZE, CUFFT_C2C, 1));
checkCudaErrors(cufftCreate(&plan));
int rank = 1;
//long long int n = 65536*8*2*4;// 0x1000000;// SIGNAL_SIZE;
/*
long long int n = SIGNAL_SIZE/2;
long long int inembed[] = { 0 };
long long int istride = 1;
long long int idist = n;
cudaDataType inputtype = CUDA_C_16F;
long long int onembed[] = { 0 };
long long int ostride = 1;
long long int odist = n;
cudaDataType outputtype = CUDA_C_16F;
*/
long long int n = SIGNAL_SIZE / 2;
long long int inembed[] = { 0 };
long long int istride = 2;
long long int idist = 1;
cudaDataType inputtype = CUDA_C_16F;
long long int onembed[] = { 0 };
long long int ostride = 1;
long long int odist = n;
cudaDataType outputtype = CUDA_C_16F;
long long int batch = BATCH;// 1;
size_t workSize;
cudaDataType executiontype = CUDA_C_16F;
checkCudaErrors(cufftXtMakePlanMany(plan, rank, &n,
inembed, istride, idist, inputtype,
onembed, ostride, odist, outputtype,
batch, &workSize,
executiontype));
// Transform signal and kernel
printf("Transforming signal cufftXtExec\n");
// timer init
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate(&start, 0);
cudaEventCreate(&stop, 0);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
StartCounter();
checkCudaErrors(cufftXtExec(plan, d_signal, r_signal, CUFFT_FORWARD));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
double ttime = GetCounter();
cudaEventElapsedTime(&gpuTime, start, stop);
printf("---- time: %.10f milliseconds\n", gpuTime);
printf("---- Execution time is: %0.3f milliseconds \n", ttime);
printf("--------------------------------------------- \n");
// Copy device memory to host
checkCudaErrors(cudaMemcpy(h_convolved_signal_half, r_signal, mem_size_half,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_signal_half, d_signal, mem_size_half,
cudaMemcpyDeviceToHost));
for (int ii = 0; ii < SIGNAL_SIZE; ii++) {
h_convolved_signal[ii].x = h_convolved_signal_half[ii].x;
h_convolved_signal[ii].y = h_convolved_signal_half[ii].y;
}
// Check if kernel execution generated and error
getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
printf("Transforming signal back cufftExecC2C\n");
checkCudaErrors(cufftXtExec(plan, r_signal, d_signal, CUFFT_INVERSE));
// Copy device memory to host
checkCudaErrors(cudaMemcpy(h_convolved_signal_half, d_signal, mem_size_half,
cudaMemcpyDeviceToHost));
// Destroy CUFFT context
checkCudaErrors(cufftDestroy(plan));
// cleanup memory
checkCudaErrors(cudaFree(d_signal));
checkCudaErrors(cudaFree(r_signal));
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
/*
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) {
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s) {
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) {
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b,
int size, float scale) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads) {
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
}
*/ |
2b2f089ea5c76017be5a251c56d343bda8cf630d.hip | // !!! This is a file automatically generated by hipify!!!
/** Instrucciones
*
* El juego comienza con una configuracion al azar entre celdas vivas y muertas.
*
* Para modificar los valores de la ejecucion simplemente hay que modificar los
* valores de las constantes declaradas mas abajo.
*
* N: Numero de filas que tendra la matriz que almacene el estado del juego.
* M: Numero de columnas que tendra la matriz que almacene el estado del juego.
*
* BLOCK_SIZE: cantidad de threads que tendra cada bloque.
* SRAND_VALUE: semilla que se ocupara para generar los numeros al azar.
* GOLIF: Indicador en caso de que se quiera verificar la cantidad de celdas
* vecinas vivas usando solo IF's.
* IMPRIMIR: Indicador en caso de que se necesite imprimir las matrices (esto
* afecta considerablemente el rendimiento de la solucion)
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <fstream>
/* Declaracin de constantes */
#define SRAND_VALUE 1998 // Semilla para generar numeros random
#define IMPRIMIR 0 // Imprimir o no las matrices de entrada y de salida
#define T_LIMIT 1 // Tiempo lmite de clculo
/* Declaracin de funciones */
__global__ void GOL(int dimFilas, int dimColumnas, int *grid, int *newGrid);
__global__ void ghostRows(int dimFilas, int dimColumnas, int *grid);
__global__ void ghostCols(int dimFilas, int dimColumnas, int *grid);
__global__ void GOL_IF(int dimFilas, int dimColumnas, int *grid, int *newGrid);
void imprimir(int *matriz, int n, int m);
/* Mtodo principal */
int main(int argc, char *argv[]) {
// Carga NxM desde un archivo
std::ifstream infile;
infile.open("NxM.txt");
int x;
int N = 0;
int M = 0;
int jfile = 0;
while (infile >> x) {
if (jfile == 0) { N = x; }
else { M = x; }
jfile = 1;
}
infile.close();
// Carga IF, ejecutar el juego preguntando con IF (0:Falso 1:Verdadero)
infile.open("IF.txt");
int GOLIF = 0;
while (infile >> x) {
GOLIF = x;
}
infile.close();
// Carga el tamao de bloque
infile.open("BLOCK_SIZE.txt");
int BLOCK_SIZE = 0;
while (infile >> x) {
BLOCK_SIZE = x;
}
infile.close();
printf("Cargando matriz %dx%d\n", N, M);
printf("BLOCK SIZE: %d\n", BLOCK_SIZE);
if (GOLIF) {
printf("IF activado\n\n");
}
else {
printf("IF desactivado\n\n");
}
int i, j;
int *h_grid; // Matriz en CPU
int *d_grid; // Matriz en GPU
int *d_newGrid; // Matriz auxiliar usada solo en GPU
int *d_tmpGrid; // Puntero auxiliar para cambiar las matrices
signed t0, t1; // Variables para medir tiempo
double time = 0; //variables para medir tiempo
double Noperaciones = 0; // Variable para medir cantidad de operaciones ejecutadas
int dimFilas = N; // Dimensiones del juego de la vida (Filas), sin contar las filas fantasmas
int dimColumnas = M; // Dimensiones del juego de la vida (Columnas), sin contar las columnas fantasmas
size_t bytes = sizeof(int) * (dimFilas + 2) *
(dimColumnas + 2);// Se annade mas espacio para dejar filas y columnas fantasmas
// Solicitamos memoria para la matriz en la CPU
h_grid = (int *)malloc(bytes);
// Solicitamos memoria para las matrices en la GPU
hipMalloc(&d_grid, bytes);
hipMalloc(&d_newGrid, bytes);
// Colocamos valores aleatorios en la matriz inicialmente
srand(SRAND_VALUE);
for (i = 1; i <= dimFilas; i++) {
for (j = 1; j <= dimColumnas; j++) {
h_grid[i * (dimColumnas + 2) + j] = rand() % 2;
}
}
// Copiamos valores iniciales de la matriz a la GPU
hipMemcpy(d_grid, h_grid, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_newGrid, h_grid, bytes, hipMemcpyHostToDevice);
// Establecemos los tamannos de los bloques y la cantidad de bloques a utilizar
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
int linGrid = (int)ceil((dimFilas * dimColumnas) / (float)(BLOCK_SIZE * BLOCK_SIZE));
dim3 gridSize(linGrid, linGrid, 1);
dim3 cpyBlockSize(BLOCK_SIZE, 1, 1);
dim3 cpyGridRowsGridSize((int)ceil(dimFilas / (float)cpyBlockSize.x), 1, 1);
dim3 cpyGridColsGridSize((int)ceil((dimColumnas + 2) / (float)cpyBlockSize.x), 1, 1);
// Imprimimos de ser el caso
if (IMPRIMIR) {
imprimir(h_grid, N, M);
}
// Ciclo principal de ejecucin
t0 = static_cast<int>(clock());
while (time < T_LIMIT) {
hipLaunchKernelGGL(( ghostRows) , dim3(cpyGridRowsGridSize), dim3(cpyBlockSize) , 0, 0, dimFilas, dimColumnas, d_grid);
hipLaunchKernelGGL(( ghostCols) , dim3(cpyGridColsGridSize), dim3(cpyBlockSize) , 0, 0, dimFilas, dimColumnas, d_grid);
if (GOLIF) {
hipLaunchKernelGGL(( GOL_IF) , dim3(gridSize), dim3(blockSize) , 0, 0, dimFilas, dimColumnas, d_grid, d_newGrid);
}
else {
hipLaunchKernelGGL(( GOL) , dim3(gridSize), dim3(blockSize) , 0, 0, dimFilas, dimColumnas, d_grid, d_newGrid);
}
// Intercambiamos punteros
d_tmpGrid = d_grid;
d_grid = d_newGrid;
d_newGrid = d_tmpGrid;
Noperaciones += N * M;
t1 = static_cast<int>(clock());
time = (double(t1 - t0) / CLOCKS_PER_SEC);
} // Fin del ciclo principal de ejecucin
// Pedimos los resultados de vuelta
hipMemcpy(h_grid, d_grid, bytes, hipMemcpyDeviceToHost);
// Imprimimos de ser el caso
if (IMPRIMIR) {
printf("\n");
imprimir(h_grid, N, M);
}
// Imprimimos datos pedidos
printf("Tiempo total: %f\n", time);
printf("Numero de operaciones efectuadas: %.0f\n", Noperaciones);
// Se borra memoria
hipFree(d_grid);
hipFree(d_newGrid);
free(h_grid);
// Retorna main()
return 0;
}
__global__ void GOL(int dimFilas, int dimColumnas, int *grid, int *newGrid) {
// Queremos id en [1,dim]
int iy = blockDim.y * blockIdx.y + threadIdx.y + 1;
int ix = blockDim.x * blockIdx.x + threadIdx.x + 1;
int id = iy * (dimColumnas + 2) + ix;
int numNeighbors;
if (iy <= dimFilas && ix <= dimColumnas) {
// Obtenemos la cantidad de vecinos vivos
numNeighbors = grid[id + (dimColumnas + 2)] + grid[id - (dimColumnas + 2)] // upper lower
+ grid[id + 1] + grid[id - 1] // right left
+ grid[id + (dimColumnas + 3)] + grid[id - (dimColumnas + 3)] // diagonals
+ grid[id - (dimColumnas + 1)] + grid[id + (dimColumnas + 1)];
int cell = grid[id];
// Ponemos las reglas del juego
if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) {
newGrid[id] = 1;
}
else if (cell == 0 && numNeighbors == 3) {
newGrid[id] = 1;
}
else {
newGrid[id] = cell;
}
}
}
__global__ void GOL_IF(int dimFilas, int dimColumnas, int *grid, int *newGrid) {
// Queremos id en [1, dim]
int iy = blockDim.y * blockIdx.y + threadIdx.y + 1;
int ix = blockDim.x * blockIdx.x + threadIdx.x + 1;
int id = iy * (dimColumnas + 2) + ix;
int numNeighbors = 0;
if (iy <= dimFilas && ix <= dimColumnas) {
// Obtenemos la cantidad de vecinos vivos
if (grid[id + (dimColumnas + 2)]) { numNeighbors++; }
if (grid[id - (dimColumnas + 2)]) { numNeighbors++; }
if (grid[id + 1]) { numNeighbors++; }
if (grid[id - 1]) { numNeighbors++; }
if (grid[id + (dimColumnas + 3)]) { numNeighbors++; }
if (grid[id - (dimColumnas + 3)]) { numNeighbors++; }
if (grid[id - (dimColumnas + 1)]) { numNeighbors++; }
if (grid[id + (dimColumnas + 1)]) { numNeighbors++; }
int cell = grid[id];
// Ponemos las reglas del juego
if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) {
newGrid[id] = 1;
}
else if (cell == 0 && numNeighbors == 3) {
newGrid[id] = 1;
}
else {
newGrid[id] = cell;
}
}
}
__global__ void ghostRows(int dimFilas, int dimColumnas, int *grid) {
// Queremos id en [1, dim]
int id = blockDim.x * blockIdx.x + threadIdx.x + 1;
if (id <= dimColumnas) {
// Copiamos la primera fila real a la ltima fila
grid[(dimColumnas + 2) * (dimFilas + 1) + id] = grid[(dimColumnas + 2) + id];
// Copiamos la ltima fila real a la primera fila
grid[id] = grid[(dimColumnas + 2) * dimFilas + id];
}
}
__global__ void ghostCols(int dimFilas, int dimColumnas, int *grid) {
// Queremos id en [0, dim+1]
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id <= dimFilas + 1) {
// Copia la primera columna real a la ultima
grid[id * (dimColumnas + 2) + dimFilas + 1] = grid[id * (dimColumnas + 2) + 1];
// Copia la ltima columna real a la primera
grid[id * (dimColumnas + 2)] = grid[id * (dimColumnas + 2) + dimFilas];
}
}
void imprimir(int *matriz, int n, int m) {
for (int i = 1; i < n - 1; i++) {
for (int j = 1; j < m - 1; j++) {
printf("%d ", matriz[i * m + j]);
}
printf("\n");
}
} | 2b2f089ea5c76017be5a251c56d343bda8cf630d.cu | /** Instrucciones
*
* El juego comienza con una configuracion al azar entre celdas vivas y muertas.
*
* Para modificar los valores de la ejecucion simplemente hay que modificar los
* valores de las constantes declaradas mas abajo.
*
* N: Numero de filas que tendra la matriz que almacene el estado del juego.
* M: Numero de columnas que tendra la matriz que almacene el estado del juego.
*
* BLOCK_SIZE: cantidad de threads que tendra cada bloque.
* SRAND_VALUE: semilla que se ocupara para generar los numeros al azar.
* GOLIF: Indicador en caso de que se quiera verificar la cantidad de celdas
* vecinas vivas usando solo IF's.
* IMPRIMIR: Indicador en caso de que se necesite imprimir las matrices (esto
* afecta considerablemente el rendimiento de la solucion)
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <fstream>
/* Declaración de constantes */
#define SRAND_VALUE 1998 // Semilla para generar numeros random
#define IMPRIMIR 0 // Imprimir o no las matrices de entrada y de salida
#define T_LIMIT 1 // Tiempo límite de cálculo
/* Declaración de funciones */
__global__ void GOL(int dimFilas, int dimColumnas, int *grid, int *newGrid);
__global__ void ghostRows(int dimFilas, int dimColumnas, int *grid);
__global__ void ghostCols(int dimFilas, int dimColumnas, int *grid);
__global__ void GOL_IF(int dimFilas, int dimColumnas, int *grid, int *newGrid);
void imprimir(int *matriz, int n, int m);
/* Método principal */
int main(int argc, char *argv[]) {
// Carga NxM desde un archivo
std::ifstream infile;
infile.open("NxM.txt");
int x;
int N = 0;
int M = 0;
int jfile = 0;
while (infile >> x) {
if (jfile == 0) { N = x; }
else { M = x; }
jfile = 1;
}
infile.close();
// Carga IF, ejecutar el juego preguntando con IF (0:Falso 1:Verdadero)
infile.open("IF.txt");
int GOLIF = 0;
while (infile >> x) {
GOLIF = x;
}
infile.close();
// Carga el tamaño de bloque
infile.open("BLOCK_SIZE.txt");
int BLOCK_SIZE = 0;
while (infile >> x) {
BLOCK_SIZE = x;
}
infile.close();
printf("Cargando matriz %dx%d\n", N, M);
printf("BLOCK SIZE: %d\n", BLOCK_SIZE);
if (GOLIF) {
printf("IF activado\n\n");
}
else {
printf("IF desactivado\n\n");
}
int i, j;
int *h_grid; // Matriz en CPU
int *d_grid; // Matriz en GPU
int *d_newGrid; // Matriz auxiliar usada solo en GPU
int *d_tmpGrid; // Puntero auxiliar para cambiar las matrices
signed t0, t1; // Variables para medir tiempo
double time = 0; //variables para medir tiempo
double Noperaciones = 0; // Variable para medir cantidad de operaciones ejecutadas
int dimFilas = N; // Dimensiones del juego de la vida (Filas), sin contar las filas fantasmas
int dimColumnas = M; // Dimensiones del juego de la vida (Columnas), sin contar las columnas fantasmas
size_t bytes = sizeof(int) * (dimFilas + 2) *
(dimColumnas + 2);// Se annade mas espacio para dejar filas y columnas fantasmas
// Solicitamos memoria para la matriz en la CPU
h_grid = (int *)malloc(bytes);
// Solicitamos memoria para las matrices en la GPU
cudaMalloc(&d_grid, bytes);
cudaMalloc(&d_newGrid, bytes);
// Colocamos valores aleatorios en la matriz inicialmente
srand(SRAND_VALUE);
for (i = 1; i <= dimFilas; i++) {
for (j = 1; j <= dimColumnas; j++) {
h_grid[i * (dimColumnas + 2) + j] = rand() % 2;
}
}
// Copiamos valores iniciales de la matriz a la GPU
cudaMemcpy(d_grid, h_grid, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_newGrid, h_grid, bytes, cudaMemcpyHostToDevice);
// Establecemos los tamannos de los bloques y la cantidad de bloques a utilizar
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
int linGrid = (int)ceil((dimFilas * dimColumnas) / (float)(BLOCK_SIZE * BLOCK_SIZE));
dim3 gridSize(linGrid, linGrid, 1);
dim3 cpyBlockSize(BLOCK_SIZE, 1, 1);
dim3 cpyGridRowsGridSize((int)ceil(dimFilas / (float)cpyBlockSize.x), 1, 1);
dim3 cpyGridColsGridSize((int)ceil((dimColumnas + 2) / (float)cpyBlockSize.x), 1, 1);
// Imprimimos de ser el caso
if (IMPRIMIR) {
imprimir(h_grid, N, M);
}
// Ciclo principal de ejecución
t0 = static_cast<int>(clock());
while (time < T_LIMIT) {
ghostRows <<< cpyGridRowsGridSize, cpyBlockSize >>> (dimFilas, dimColumnas, d_grid);
ghostCols <<< cpyGridColsGridSize, cpyBlockSize >>> (dimFilas, dimColumnas, d_grid);
if (GOLIF) {
GOL_IF <<< gridSize, blockSize >>> (dimFilas, dimColumnas, d_grid, d_newGrid);
}
else {
GOL <<< gridSize, blockSize >>> (dimFilas, dimColumnas, d_grid, d_newGrid);
}
// Intercambiamos punteros
d_tmpGrid = d_grid;
d_grid = d_newGrid;
d_newGrid = d_tmpGrid;
Noperaciones += N * M;
t1 = static_cast<int>(clock());
time = (double(t1 - t0) / CLOCKS_PER_SEC);
} // Fin del ciclo principal de ejecución
// Pedimos los resultados de vuelta
cudaMemcpy(h_grid, d_grid, bytes, cudaMemcpyDeviceToHost);
// Imprimimos de ser el caso
if (IMPRIMIR) {
printf("\n");
imprimir(h_grid, N, M);
}
// Imprimimos datos pedidos
printf("Tiempo total: %f\n", time);
printf("Numero de operaciones efectuadas: %.0f\n", Noperaciones);
// Se borra memoria
cudaFree(d_grid);
cudaFree(d_newGrid);
free(h_grid);
// Retorna main()
return 0;
}
__global__ void GOL(int dimFilas, int dimColumnas, int *grid, int *newGrid) {
// Queremos id en [1,dim]
int iy = blockDim.y * blockIdx.y + threadIdx.y + 1;
int ix = blockDim.x * blockIdx.x + threadIdx.x + 1;
int id = iy * (dimColumnas + 2) + ix;
int numNeighbors;
if (iy <= dimFilas && ix <= dimColumnas) {
// Obtenemos la cantidad de vecinos vivos
numNeighbors = grid[id + (dimColumnas + 2)] + grid[id - (dimColumnas + 2)] // upper lower
+ grid[id + 1] + grid[id - 1] // right left
+ grid[id + (dimColumnas + 3)] + grid[id - (dimColumnas + 3)] // diagonals
+ grid[id - (dimColumnas + 1)] + grid[id + (dimColumnas + 1)];
int cell = grid[id];
// Ponemos las reglas del juego
if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) {
newGrid[id] = 1;
}
else if (cell == 0 && numNeighbors == 3) {
newGrid[id] = 1;
}
else {
newGrid[id] = cell;
}
}
}
__global__ void GOL_IF(int dimFilas, int dimColumnas, int *grid, int *newGrid) {
// Queremos id en [1, dim]
int iy = blockDim.y * blockIdx.y + threadIdx.y + 1;
int ix = blockDim.x * blockIdx.x + threadIdx.x + 1;
int id = iy * (dimColumnas + 2) + ix;
int numNeighbors = 0;
if (iy <= dimFilas && ix <= dimColumnas) {
// Obtenemos la cantidad de vecinos vivos
if (grid[id + (dimColumnas + 2)]) { numNeighbors++; }
if (grid[id - (dimColumnas + 2)]) { numNeighbors++; }
if (grid[id + 1]) { numNeighbors++; }
if (grid[id - 1]) { numNeighbors++; }
if (grid[id + (dimColumnas + 3)]) { numNeighbors++; }
if (grid[id - (dimColumnas + 3)]) { numNeighbors++; }
if (grid[id - (dimColumnas + 1)]) { numNeighbors++; }
if (grid[id + (dimColumnas + 1)]) { numNeighbors++; }
int cell = grid[id];
// Ponemos las reglas del juego
if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) {
newGrid[id] = 1;
}
else if (cell == 0 && numNeighbors == 3) {
newGrid[id] = 1;
}
else {
newGrid[id] = cell;
}
}
}
__global__ void ghostRows(int dimFilas, int dimColumnas, int *grid) {
// Queremos id en [1, dim]
int id = blockDim.x * blockIdx.x + threadIdx.x + 1;
if (id <= dimColumnas) {
// Copiamos la primera fila real a la última fila
grid[(dimColumnas + 2) * (dimFilas + 1) + id] = grid[(dimColumnas + 2) + id];
// Copiamos la última fila real a la primera fila
grid[id] = grid[(dimColumnas + 2) * dimFilas + id];
}
}
__global__ void ghostCols(int dimFilas, int dimColumnas, int *grid) {
// Queremos id en [0, dim+1]
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id <= dimFilas + 1) {
// Copia la primera columna real a la ultima
grid[id * (dimColumnas + 2) + dimFilas + 1] = grid[id * (dimColumnas + 2) + 1];
// Copia la última columna real a la primera
grid[id * (dimColumnas + 2)] = grid[id * (dimColumnas + 2) + dimFilas];
}
}
void imprimir(int *matriz, int n, int m) {
for (int i = 1; i < n - 1; i++) {
for (int j = 1; j < m - 1; j++) {
printf("%d ", matriz[i * m + j]);
}
printf("\n");
}
} |
65052e14e6d4c889b5f1297b61eebea83abdd345.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::SaveCudaContext{
[&]() {
dh::safe_cuda(hipSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
};
}
namespace {
class HistogramCutsWrapper : public common::HistogramCuts {
public:
using SuperT = common::HistogramCuts;
void SetValues(std::vector<float> cuts) {
SuperT::cut_values_ = cuts;
}
void SetPtrs(std::vector<uint32_t> ptrs) {
SuperT::cut_ptrs_ = ptrs;
}
void SetMins(std::vector<float> mins) {
SuperT::min_vals_ = mins;
}
};
} // anonymous namespace
template <typename GradientSumT>
void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetBatches<xgboost::SparsePage>().begin();
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
// 24 cut fields, 3 cut fields for each feature (column).
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
auto is_dense = (*dmat)->Info().num_nonzero_ ==
(*dmat)->Info().num_row_ * (*dmat)->Info().num_col_;
size_t row_stride = 0;
const auto &offset_vec = batch.offset.ConstHostVector();
for (size_t i = 1; i < offset_vec.size(); ++i) {
row_stride = ::max(row_stride, offset_vec[i] - offset_vec[i-1]);
}
shard->InitCompressedData(cmat, row_stride, is_dense);
shard->CreateHistIndices(
batch, cmat, RowStateOnDevice(batch.Size(), batch.Size()), -1);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
tree::TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols, kNCols);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.ellpack_matrix.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.ellpack_matrix.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * shard.ellpack_matrix.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientSumT> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.data();
dh::safe_cuda(hipMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.size(),
hipMemcpyDeviceToHost));
shard.row_partitioner.reset(new RowPartitioner(0, kNRows));
shard.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(shard.gpair, h_gpair);
shard.use_shared_memory_histograms = use_shared_memory_histograms;
shard.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = shard.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), data_size,
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}
};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard{
new DeviceShard<GradientPairPrecise>(0, 0, 0, kNRows, param, kNCols,
kNCols)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
shard->ba.Allocate(0,
&(shard->feature_segments), cmat.Ptrs().size(),
&(shard->min_fvalue), cmat.MinValues().size(),
&(shard->gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.Ptrs());
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.Values());
dh::CopyVectorToDeviceSpan(shard->monotone_constraints,
param.monotone_constraints);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.MinValues());
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.Data().begin());
shard->column_sampler.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
shard->node_value_constraints.resize(1);
shard->node_value_constraints[0].lower_bound = -1.0;
shard->node_value_constraints[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
shard->EvaluateSplits({ 0,0 }, tree, kNCols);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(training_params, &generic_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Configure(training_params, &generic_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
ASSERT_EQ(hist_maker.shards_.size(), hist_maker_ext.shards_.size());
// Extract the device shards from the histogram makers and from that its compressed
// histogram index
for (size_t i = 0; i < hist_maker.shards_.size(); ++i) {
const auto &dev_shard = hist_maker.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer(dev_shard->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, dev_shard->gidx_buffer);
const auto &dev_shard_ext = hist_maker_ext.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer_ext(dev_shard_ext->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer_ext, dev_shard_ext->gidx_buffer);
ASSERT_EQ(dev_shard->n_bins, dev_shard_ext->n_bins);
ASSERT_EQ(dev_shard->gidx_buffer.size(), dev_shard_ext->gidx_buffer.size());
ASSERT_EQ(h_gidx_buffer, h_gidx_buffer_ext);
}
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
} // namespace tree
} // namespace xgboost
| 65052e14e6d4c889b5f1297b61eebea83abdd345.cu | /*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::SaveCudaContext{
[&]() {
dh::safe_cuda(cudaSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
};
}
namespace {
class HistogramCutsWrapper : public common::HistogramCuts {
public:
using SuperT = common::HistogramCuts;
void SetValues(std::vector<float> cuts) {
SuperT::cut_values_ = cuts;
}
void SetPtrs(std::vector<uint32_t> ptrs) {
SuperT::cut_ptrs_ = ptrs;
}
void SetMins(std::vector<float> mins) {
SuperT::min_vals_ = mins;
}
};
} // anonymous namespace
template <typename GradientSumT>
void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetBatches<xgboost::SparsePage>().begin();
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
// 24 cut fields, 3 cut fields for each feature (column).
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
auto is_dense = (*dmat)->Info().num_nonzero_ ==
(*dmat)->Info().num_row_ * (*dmat)->Info().num_col_;
size_t row_stride = 0;
const auto &offset_vec = batch.offset.ConstHostVector();
for (size_t i = 1; i < offset_vec.size(); ++i) {
row_stride = std::max(row_stride, offset_vec[i] - offset_vec[i-1]);
}
shard->InitCompressedData(cmat, row_stride, is_dense);
shard->CreateHistIndices(
batch, cmat, RowStateOnDevice(batch.Size(), batch.Size()), -1);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
tree::TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols, kNCols);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.ellpack_matrix.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientPairPrecise> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(shard.gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, shard.gidx_buffer);
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.ellpack_matrix.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * shard.ellpack_matrix.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
DeviceShard<GradientSumT> shard(0, 0, 0, kNRows, param, kNCols,
kNCols);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.data();
dh::safe_cuda(cudaMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.size(),
cudaMemcpyDeviceToHost));
shard.row_partitioner.reset(new RowPartitioner(0, kNRows));
shard.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(shard.gpair, h_gpair);
shard.use_shared_memory_histograms = use_shared_memory_histograms;
shard.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = shard.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}
};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard{
new DeviceShard<GradientPairPrecise>(0, 0, 0, kNRows, param, kNCols,
kNCols)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
shard->ba.Allocate(0,
&(shard->feature_segments), cmat.Ptrs().size(),
&(shard->min_fvalue), cmat.MinValues().size(),
&(shard->gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(shard->feature_segments, cmat.Ptrs());
dh::CopyVectorToDeviceSpan(shard->gidx_fvalue_map, cmat.Values());
dh::CopyVectorToDeviceSpan(shard->monotone_constraints,
param.monotone_constraints);
shard->ellpack_matrix.feature_segments = shard->feature_segments;
shard->ellpack_matrix.gidx_fvalue_map = shard->gidx_fvalue_map;
dh::CopyVectorToDeviceSpan(shard->min_fvalue, cmat.MinValues());
shard->ellpack_matrix.min_fvalue = shard->min_fvalue;
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.Data().begin());
shard->column_sampler.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
shard->node_value_constraints.resize(1);
shard->node_value_constraints[0].lower_bound = -1.0;
shard->node_value_constraints[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
shard->EvaluateSplits({ 0,0 }, tree, kNCols);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(training_params, &generic_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Configure(training_params, &generic_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
ASSERT_EQ(hist_maker.shards_.size(), hist_maker_ext.shards_.size());
// Extract the device shards from the histogram makers and from that its compressed
// histogram index
for (size_t i = 0; i < hist_maker.shards_.size(); ++i) {
const auto &dev_shard = hist_maker.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer(dev_shard->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, dev_shard->gidx_buffer);
const auto &dev_shard_ext = hist_maker_ext.shards_[i];
std::vector<common::CompressedByteT> h_gidx_buffer_ext(dev_shard_ext->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer_ext, dev_shard_ext->gidx_buffer);
ASSERT_EQ(dev_shard->n_bins, dev_shard_ext->n_bins);
ASSERT_EQ(dev_shard->gidx_buffer.size(), dev_shard_ext->gidx_buffer.size());
ASSERT_EQ(h_gidx_buffer, h_gidx_buffer_ext);
}
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
} // namespace tree
} // namespace xgboost
|
8ebec6f0b22420282e3b45d0666da3bda03db261.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 1
#include <wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < len) out[i] = in1[i] + in2[i];
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc(&deviceInput1, inputLength * sizeof(float));
hipMalloc(&deviceInput2, inputLength * sizeof(float));
hipMalloc(&deviceOutput, inputLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput1, hostInput1, inputLength * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceInput2, hostInput2, inputLength * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 grid ((inputLength - 1)/256 + 1, 1, 1);
dim3 block (256, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( vecAdd), dim3(grid),dim3(block), 0, 0, deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, inputLength * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(&deviceInput1);
hipFree(&deviceInput2);
hipFree(&deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
| 8ebec6f0b22420282e3b45d0666da3bda03db261.cu | // MP 1
#include <wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < len) out[i] = in1[i] + in2[i];
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc(&deviceInput1, inputLength * sizeof(float));
cudaMalloc(&deviceInput2, inputLength * sizeof(float));
cudaMalloc(&deviceOutput, inputLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput1, hostInput1, inputLength * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, inputLength * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 grid ((inputLength - 1)/256 + 1, 1, 1);
dim3 block (256, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
vecAdd<<<grid,block>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, inputLength * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(&deviceInput1);
cudaFree(&deviceInput2);
cudaFree(&deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
5bb04e22b9f37133b674e886bd1b6c8eb7f1dfc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -----------------------------------------------------------------------------
*
* Module : Scan
* Copyright : (c) 2009 Trevor L. McDonell
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include "scan.h"
#include "algorithms.h"
#include "utils.h"
#include "operator.h"
#include "cudpp/cudpp_globals.h"
#include "cudpp/segmented_scan_kernel.cu"
#include "cudpp/vector_kernel.cu"
template <typename T>
struct segscan_plan
{
T **sums;
unsigned int **flags;
unsigned int **indices;
unsigned int num_levels;
};
static inline unsigned int
calc_num_blocks(unsigned int N)
{
return max(1u, (unsigned int)ceil((double)N / (SEGSCAN_ELTS_PER_THREAD * CTA_SIZE)));
}
/*
* Scans of large arrays must be split (possibly recursively) into a hierarchy
* of block scans, where each block is processed by a single thread block. On
* returning from a recursive call, the total sum of each block from the level
* below is added to all elements of the first segment of the corresponding
* block. This is the CPU-side workhorse that achieves this.
*/
template <class op, typename T, bool backward, bool exclusive, bool shift_flags>
static void
segscan_recursive
(
const T *d_in,
const unsigned int *d_flags,
T *d_out,
segscan_plan<T> *plan,
const unsigned int N,
const unsigned int level
)
{
unsigned int num_blocks = calc_num_blocks(N);
unsigned int per_block = CTA_SIZE * 2;
bool is_full = N == num_blocks * SEGSCAN_ELTS_PER_THREAD * CTA_SIZE;
/*
* Space to store flags in the shared memory. Two sets are required, one
* gets modified and the other does not.
*/
unsigned int flag_space = per_block * sizeof(unsigned int);
unsigned int idx_space = per_block * sizeof(unsigned int);
dim3 grid(num_blocks, 1, 1);
dim3 threads(CTA_SIZE, 1, 1);
unsigned int smem = sizeof(T) * per_block + flag_space + idx_space;
/*
* Check the hardware
*/
int dev;
hipDeviceProp_t props;
hipGetDevice(&dev);
hipGetDeviceProperties(&props, dev);
/*
* Set up execution parameters, and execute the scan
*/
#define MULTIBLOCK 0x01
#define FULLBLOCK 0x02
#define SM12_HW 0x04
#define BACKWARD 0x08
int traits = 0;
if (num_blocks > 1) traits |= MULTIBLOCK;
if (is_full) traits |= FULLBLOCK;
if (props.minor >= 2) traits |= SM12_HW;
switch (traits)
{
case 0:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, false, false> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N);
break;
case MULTIBLOCK:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, true, false> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
case FULLBLOCK:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, false, false> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N);
break;
case SM12_HW:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, false, true> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N);
break;
case MULTIBLOCK | FULLBLOCK:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, true, false> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
case MULTIBLOCK | SM12_HW:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, true, true> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
case FULLBLOCK | SM12_HW:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, false, true> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N);
break;
case MULTIBLOCK | FULLBLOCK | SM12_HW:
hipLaunchKernelGGL(( segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, true, true> >)
, dim3(grid), dim3(threads), smem, 0, d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
default:
assert(!"Non-exhaustive patterns in match");
}
/*
* After scanning the sub-blocks, take all of the last values and
* segment-scan those. This will give the new value which must be added to
* the first segment of each block to get the final result.
*/
if (num_blocks > 1)
{
T *sums = plan->sums[level];
unsigned int *indices = plan->indices[level];
segscan_recursive
<op, T, backward, false, false>
(sums, plan->flags[level], sums, plan, num_blocks, level + 1);
traits = 0;
if (is_full) traits |= FULLBLOCK;
if (backward) traits |= BACKWARD;
switch (traits)
{
case 0:
hipLaunchKernelGGL(( vectorSegmentedAddUniform4<T, op, false>), dim3(grid), dim3(threads), 0, 0, d_out, sums, indices, N, 0, 0);
break;
case FULLBLOCK:
hipLaunchKernelGGL(( vectorSegmentedAddUniform4<T, op, true>), dim3(grid), dim3(threads), 0, 0, d_out, sums, indices, N, 0, 0);
break;
case BACKWARD:
hipLaunchKernelGGL(( vectorSegmentedAddUniformToRight4<T, op, false>), dim3(grid), dim3(threads), 0, 0, d_out, sums, indices, N, 0, 0);
break;
case FULLBLOCK | BACKWARD:
hipLaunchKernelGGL(( vectorSegmentedAddUniformToRight4<T, op, true>), dim3(grid), dim3(threads), 0, 0, d_out, sums, indices, N, 0, 0);
break;
default:
assert(!"Non-exhaustive patterns in match");
}
}
#undef MULTIBLOCK
#undef FULLBLOCK
#undef SM12_HW
#undef BACKWARD
}
/*
* Allocate temporary memory used by the segmented scan
*/
template <typename T>
static void
segscan_init(int N, segscan_plan<T> *plan)
{
unsigned int level = 0;
unsigned int elements = N;
unsigned int num_blocks;
/*
* Determine how many intermediate block-level summations will be required
*/
for (elements = N; elements > 1; elements = num_blocks)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
++level;
}
plan->num_levels = level;
plan->sums = (T**) malloc(level * sizeof(T*));
plan->flags = (unsigned int**) malloc(level * sizeof(unsigned int *));
plan->indices = (unsigned int**) malloc(level * sizeof(unsigned int *));
/*
* Now allocate the necessary storage at each level
*/
for (elements = N, level = 0; elements > 1; elements = num_blocks, level++)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
{
hipMalloc((void**) &plan->sums[level], num_blocks * sizeof(T));
hipMalloc((void**) &plan->flags[level], num_blocks * sizeof(unsigned int));
hipMalloc((void**) &plan->indices[level], num_blocks * sizeof(unsigned int));
}
}
}
/*
* Clean up temporary memory
*/
template <typename T>
static void
segscan_finalise(segscan_plan<T> *p)
{
for (unsigned int l = 0; l < p->num_levels; ++l)
{
hipFree(p->sums[l]);
hipFree(p->flags[l]);
hipFree(p->indices[l]);
}
free(p->sums);
free(p->flags);
free(p->indices);
}
/*
* Perform a segmented scan operation on the input array of data, much like
* `scan', but with an additional input array of non-zero `flags' that demarcate
* the first element of a segment.
*/
template <class op, typename T, bool backward, bool exclusive>
void
segmented_scan
(
const T *d_in,
const unsigned int *d_flags,
T *d_out,
const unsigned int length
)
{
segscan_plan<T> plan;
segscan_init<T>(length, &plan);
segscan_recursive<op, T, backward, exclusive, backward>(d_in, d_flags, d_out, &plan, length, 0);
segscan_finalise<T>(&plan);
}
// -----------------------------------------------------------------------------
// Instances
// -----------------------------------------------------------------------------
void postsegscanr_plusf(const float *d_in, const unsigned int *d_flags, float *d_out, const unsigned int N)
{
segmented_scan< Plus<float>, float, true, false >(d_in, d_flags, d_out, N);
}
| 5bb04e22b9f37133b674e886bd1b6c8eb7f1dfc8.cu | /* -----------------------------------------------------------------------------
*
* Module : Scan
* Copyright : (c) 2009 Trevor L. McDonell
* License : BSD
*
* ---------------------------------------------------------------------------*/
#include "scan.h"
#include "algorithms.h"
#include "utils.h"
#include "operator.h"
#include "cudpp/cudpp_globals.h"
#include "cudpp/segmented_scan_kernel.cu"
#include "cudpp/vector_kernel.cu"
template <typename T>
struct segscan_plan
{
T **sums;
unsigned int **flags;
unsigned int **indices;
unsigned int num_levels;
};
static inline unsigned int
calc_num_blocks(unsigned int N)
{
return max(1u, (unsigned int)ceil((double)N / (SEGSCAN_ELTS_PER_THREAD * CTA_SIZE)));
}
/*
* Scans of large arrays must be split (possibly recursively) into a hierarchy
* of block scans, where each block is processed by a single thread block. On
* returning from a recursive call, the total sum of each block from the level
* below is added to all elements of the first segment of the corresponding
* block. This is the CPU-side workhorse that achieves this.
*/
template <class op, typename T, bool backward, bool exclusive, bool shift_flags>
static void
segscan_recursive
(
const T *d_in,
const unsigned int *d_flags,
T *d_out,
segscan_plan<T> *plan,
const unsigned int N,
const unsigned int level
)
{
unsigned int num_blocks = calc_num_blocks(N);
unsigned int per_block = CTA_SIZE * 2;
bool is_full = N == num_blocks * SEGSCAN_ELTS_PER_THREAD * CTA_SIZE;
/*
* Space to store flags in the shared memory. Two sets are required, one
* gets modified and the other does not.
*/
unsigned int flag_space = per_block * sizeof(unsigned int);
unsigned int idx_space = per_block * sizeof(unsigned int);
dim3 grid(num_blocks, 1, 1);
dim3 threads(CTA_SIZE, 1, 1);
unsigned int smem = sizeof(T) * per_block + flag_space + idx_space;
/*
* Check the hardware
*/
int dev;
cudaDeviceProp props;
cudaGetDevice(&dev);
cudaGetDeviceProperties(&props, dev);
/*
* Set up execution parameters, and execute the scan
*/
#define MULTIBLOCK 0x01
#define FULLBLOCK 0x02
#define SM12_HW 0x04
#define BACKWARD 0x08
int traits = 0;
if (num_blocks > 1) traits |= MULTIBLOCK;
if (is_full) traits |= FULLBLOCK;
if (props.minor >= 2) traits |= SM12_HW;
switch (traits)
{
case 0:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, false, false> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N);
break;
case MULTIBLOCK:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, true, false> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
case FULLBLOCK:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, false, false> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N);
break;
case SM12_HW:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, false, true> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N);
break;
case MULTIBLOCK | FULLBLOCK:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, true, false> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
case MULTIBLOCK | SM12_HW:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, false, true, true> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
case FULLBLOCK | SM12_HW:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, false, true> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N);
break;
case MULTIBLOCK | FULLBLOCK | SM12_HW:
segmentedScan4
< T, SegmentedScanTraits<T, op, backward, exclusive, shift_flags, true, true, true> >
<<<grid, threads, smem>>>(d_out, d_in, d_flags, N, plan->sums[level], plan->flags[level], plan->indices[level]);
break;
default:
assert(!"Non-exhaustive patterns in match");
}
/*
* After scanning the sub-blocks, take all of the last values and
* segment-scan those. This will give the new value which must be added to
* the first segment of each block to get the final result.
*/
if (num_blocks > 1)
{
T *sums = plan->sums[level];
unsigned int *indices = plan->indices[level];
segscan_recursive
<op, T, backward, false, false>
(sums, plan->flags[level], sums, plan, num_blocks, level + 1);
traits = 0;
if (is_full) traits |= FULLBLOCK;
if (backward) traits |= BACKWARD;
switch (traits)
{
case 0:
vectorSegmentedAddUniform4<T, op, false><<<grid, threads>>>(d_out, sums, indices, N, 0, 0);
break;
case FULLBLOCK:
vectorSegmentedAddUniform4<T, op, true><<<grid, threads>>>(d_out, sums, indices, N, 0, 0);
break;
case BACKWARD:
vectorSegmentedAddUniformToRight4<T, op, false><<<grid, threads>>>(d_out, sums, indices, N, 0, 0);
break;
case FULLBLOCK | BACKWARD:
vectorSegmentedAddUniformToRight4<T, op, true><<<grid, threads>>>(d_out, sums, indices, N, 0, 0);
break;
default:
assert(!"Non-exhaustive patterns in match");
}
}
#undef MULTIBLOCK
#undef FULLBLOCK
#undef SM12_HW
#undef BACKWARD
}
/*
* Allocate temporary memory used by the segmented scan
*/
template <typename T>
static void
segscan_init(int N, segscan_plan<T> *plan)
{
unsigned int level = 0;
unsigned int elements = N;
unsigned int num_blocks;
/*
* Determine how many intermediate block-level summations will be required
*/
for (elements = N; elements > 1; elements = num_blocks)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
++level;
}
plan->num_levels = level;
plan->sums = (T**) malloc(level * sizeof(T*));
plan->flags = (unsigned int**) malloc(level * sizeof(unsigned int *));
plan->indices = (unsigned int**) malloc(level * sizeof(unsigned int *));
/*
* Now allocate the necessary storage at each level
*/
for (elements = N, level = 0; elements > 1; elements = num_blocks, level++)
{
num_blocks = calc_num_blocks(elements);
if (num_blocks > 1)
{
cudaMalloc((void**) &plan->sums[level], num_blocks * sizeof(T));
cudaMalloc((void**) &plan->flags[level], num_blocks * sizeof(unsigned int));
cudaMalloc((void**) &plan->indices[level], num_blocks * sizeof(unsigned int));
}
}
}
/*
* Clean up temporary memory
*/
template <typename T>
static void
segscan_finalise(segscan_plan<T> *p)
{
for (unsigned int l = 0; l < p->num_levels; ++l)
{
cudaFree(p->sums[l]);
cudaFree(p->flags[l]);
cudaFree(p->indices[l]);
}
free(p->sums);
free(p->flags);
free(p->indices);
}
/*
* Perform a segmented scan operation on the input array of data, much like
* `scan', but with an additional input array of non-zero `flags' that demarcate
* the first element of a segment.
*/
template <class op, typename T, bool backward, bool exclusive>
void
segmented_scan
(
const T *d_in,
const unsigned int *d_flags,
T *d_out,
const unsigned int length
)
{
segscan_plan<T> plan;
segscan_init<T>(length, &plan);
segscan_recursive<op, T, backward, exclusive, backward>(d_in, d_flags, d_out, &plan, length, 0);
segscan_finalise<T>(&plan);
}
// -----------------------------------------------------------------------------
// Instances
// -----------------------------------------------------------------------------
void postsegscanr_plusf(const float *d_in, const unsigned int *d_flags, float *d_out, const unsigned int N)
{
segmented_scan< Plus<float>, float, true, false >(d_in, d_flags, d_out, N);
}
|
672beb6ef6da232dabd5ab2114a06da5efd63437.hip | // !!! This is a file automatically generated by hipify!!!
/*
MULTIPLE-BLOCK VERSION
Assume that all sub-blocks in each half are located
You are given 2 functions to calculate the rank of a key: get_rank_inclusive and get_rank_exclusive
The input data is read from the stdin, it contains 5 lines:
+ Line 1: half_size, the number of elements in each half (half_size = n/2)
+ Line 2: input data (half_size*2 elements)
+ Line 3: sb_num, the number of sub-blocks in each half
+ Line 4: the array sb_len_left (the length of each sub-block in the left half)
+ Line 5: the array sb_len_right (the length of each sub-block in the right half)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
/**/
#define SAMPLE_INTERVAL 4 /* pick a sample every 4 elements */
/**/
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (including this key)
*/
static inline __device__ int get_rank_inclusive(int key, int* arr, int len);
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (excluding this key)
*/
static inline __device__ int get_rank_exclusive(int key, int* arr, int len);
/**/
__global__ void pairwise_merge(int* input, int half_size, int* sb_len_left, int* sb_len_right, int sb_num, int* output)
{
int i, other_rank, output_rank;
/**/
int* left_half = input;
int* right_half = input + half_size;
int* cur_output = output;
/**/
for(i=0;i<blockIdx.x;++i)
{
left_half += sb_len_left[i];
right_half += sb_len_right[i];
cur_output = cur_output + sb_len_left[i] + sb_len_right[i];
}
if(threadIdx.x < sb_len_left[blockIdx.x])
{
int key = left_half[threadIdx.x];
/* use function get_rank_exclusive() to calculate the rank
of key in the left_right*/
other_rank = get_rank_exclusive(key,right_half,sb_len_right[blockIdx.x]);
/* calculate the output rank of key */
output_rank = threadIdx.x + other_rank;
/* assign key to the correspoding position in the output array*/
cur_output[output_rank] = key;
}
/**/
/********************************************/
if(threadIdx.x < sb_len_right[blockIdx.x])
{
int key = right_half[threadIdx.x];
/* use function get_rank_inclusive() to calculate the rank
of key in the left_half*/
other_rank = get_rank_inclusive(key,left_half,sb_len_left[blockIdx.x]);
/* calculate the output rank of key */
output_rank = threadIdx.x + other_rank;
/* assign key to the correspoding position in the output array*/
cur_output[output_rank] = key;
}
/**/
}/* end of the kernel*/
/**/
void checkCUDAError(const char *msg);
/**/
int main(int argc, char* argv[])
{
int i;
/**/
int* h_input, *h_output;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/*******************/
/** READING INPUT **/
/*******************/
int half_size,size;
int sb_num; //number of sub-block
/* read the value of half_size from stdin*/
scanf("%d", &half_size);
size = half_size*2;
/* Allocate host memory */
h_input = (int*) malloc(sizeof(int)*size);
h_output = (int*) malloc(sizeof(int)*size);
/* read input from stdin */
for(i=0;i<size;++i) scanf("%d", &h_input[i]);
/* read the value of sb_num */
scanf("%d", &sb_num);
int *h_sb_len_left, *h_sb_len_right;
h_sb_len_left = (int*) malloc(sizeof(int)*sb_num);
h_sb_len_right = (int*) malloc(sizeof(int)*sb_num);
for(i=0;i<sb_num;++i) scanf("%d", &h_sb_len_left[i]);
for(i=0;i<sb_num;++i) scanf("%d", &h_sb_len_right[i]);
/**/
/****************************/
/** FINISHED INPUT READING **/
/****************************/
/******************************/
/* allocate device memories */
/******************************/
int* d_input, *d_output, *d_sb_len_left, *d_sb_len_right;
hipMalloc(&d_input,sizeof(int)*size);
hipMalloc(&d_output,sizeof(int)*size);
hipMalloc(&d_sb_len_left,sizeof(int)*sb_num);
hipMalloc(&d_sb_len_right,sizeof(int)*sb_num);
hipEventRecord(start,0);
/***********************************/
/* copy input data to device */
/***********************************/
hipMemcpy(d_input, h_input, size*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_sb_len_left, h_sb_len_left, sb_num*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_sb_len_right, h_sb_len_right, sb_num*sizeof(int), hipMemcpyHostToDevice);
/* invoke the kernel, with sb_num block, SAMPLE_INTERVAL threads */
hipLaunchKernelGGL(( pairwise_merge), dim3(sb_num),dim3(SAMPLE_INTERVAL), 0, 0, d_input,half_size,d_sb_len_left,d_sb_len_right,sb_num,d_output);
checkCUDAError("kernel invocation\n");
/* copy the sorted results back to host */
hipMemcpy(h_output, d_output, sizeof(int)*size, hipMemcpyDeviceToHost);
/**/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
fprintf(stderr,"Elapsed time = %f (s)\n",elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
/*******************************************/
/* Print the final scan result */
/*******************************************/
printf("The sorted array is :\n");
for(int i=0;i<size;++i) printf("%d ",h_output[i]);
printf("\n");
/* free device memory */
hipFree(d_sb_len_left);
hipFree(d_sb_len_right);
hipFree(d_input);
hipFree(d_output);
/* free host memory */
free(h_input);
free(h_output);
free(h_sb_len_left);
free(h_sb_len_right);
/**/
return 0;
}
/*function to test CUDA command*/
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (including this key)
Naive implementation.
Binary search can be used to implement more efficient function
*/
static inline __device__ int get_rank_inclusive(int key, int* arr, int len)
{
int rank=0;
while((rank < len) && (arr[rank]<=key)) ++rank;
return rank;
}
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (excluding this key)
Naive implementation.
Binary search can be used to implement more efficient function
*/
static inline __device__ int get_rank_exclusive(int key, int* arr, int len)
{
int rank=0;
while((rank < len) && (arr[rank]<key)) ++rank;
return rank;
}
| 672beb6ef6da232dabd5ab2114a06da5efd63437.cu | /*
MULTIPLE-BLOCK VERSION
Assume that all sub-blocks in each half are located
You are given 2 functions to calculate the rank of a key: get_rank_inclusive and get_rank_exclusive
The input data is read from the stdin, it contains 5 lines:
+ Line 1: half_size, the number of elements in each half (half_size = n/2)
+ Line 2: input data (half_size*2 elements)
+ Line 3: sb_num, the number of sub-blocks in each half
+ Line 4: the array sb_len_left (the length of each sub-block in the left half)
+ Line 5: the array sb_len_right (the length of each sub-block in the right half)
*/
#include <stdio.h>
#include <cuda.h>
/**/
#define SAMPLE_INTERVAL 4 /* pick a sample every 4 elements */
/**/
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (including this key)
*/
static inline __device__ int get_rank_inclusive(int key, int* arr, int len);
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (excluding this key)
*/
static inline __device__ int get_rank_exclusive(int key, int* arr, int len);
/**/
__global__ void pairwise_merge(int* input, int half_size, int* sb_len_left, int* sb_len_right, int sb_num, int* output)
{
int i, other_rank, output_rank;
/**/
int* left_half = input;
int* right_half = input + half_size;
int* cur_output = output;
/**/
for(i=0;i<blockIdx.x;++i)
{
left_half += sb_len_left[i];
right_half += sb_len_right[i];
cur_output = cur_output + sb_len_left[i] + sb_len_right[i];
}
if(threadIdx.x < sb_len_left[blockIdx.x])
{
int key = left_half[threadIdx.x];
/* use function get_rank_exclusive() to calculate the rank
of key in the left_right*/
other_rank = get_rank_exclusive(key,right_half,sb_len_right[blockIdx.x]);
/* calculate the output rank of key */
output_rank = threadIdx.x + other_rank;
/* assign key to the correspoding position in the output array*/
cur_output[output_rank] = key;
}
/**/
/********************************************/
if(threadIdx.x < sb_len_right[blockIdx.x])
{
int key = right_half[threadIdx.x];
/* use function get_rank_inclusive() to calculate the rank
of key in the left_half*/
other_rank = get_rank_inclusive(key,left_half,sb_len_left[blockIdx.x]);
/* calculate the output rank of key */
output_rank = threadIdx.x + other_rank;
/* assign key to the correspoding position in the output array*/
cur_output[output_rank] = key;
}
/**/
}/* end of the kernel*/
/**/
void checkCUDAError(const char *msg);
/**/
int main(int argc, char* argv[])
{
int i;
/**/
int* h_input, *h_output;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*******************/
/** READING INPUT **/
/*******************/
int half_size,size;
int sb_num; //number of sub-block
/* read the value of half_size from stdin*/
scanf("%d", &half_size);
size = half_size*2;
/* Allocate host memory */
h_input = (int*) malloc(sizeof(int)*size);
h_output = (int*) malloc(sizeof(int)*size);
/* read input from stdin */
for(i=0;i<size;++i) scanf("%d", &h_input[i]);
/* read the value of sb_num */
scanf("%d", &sb_num);
int *h_sb_len_left, *h_sb_len_right;
h_sb_len_left = (int*) malloc(sizeof(int)*sb_num);
h_sb_len_right = (int*) malloc(sizeof(int)*sb_num);
for(i=0;i<sb_num;++i) scanf("%d", &h_sb_len_left[i]);
for(i=0;i<sb_num;++i) scanf("%d", &h_sb_len_right[i]);
/**/
/****************************/
/** FINISHED INPUT READING **/
/****************************/
/******************************/
/* allocate device memories */
/******************************/
int* d_input, *d_output, *d_sb_len_left, *d_sb_len_right;
cudaMalloc(&d_input,sizeof(int)*size);
cudaMalloc(&d_output,sizeof(int)*size);
cudaMalloc(&d_sb_len_left,sizeof(int)*sb_num);
cudaMalloc(&d_sb_len_right,sizeof(int)*sb_num);
cudaEventRecord(start,0);
/***********************************/
/* copy input data to device */
/***********************************/
cudaMemcpy(d_input, h_input, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_sb_len_left, h_sb_len_left, sb_num*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_sb_len_right, h_sb_len_right, sb_num*sizeof(int), cudaMemcpyHostToDevice);
/* invoke the kernel, with sb_num block, SAMPLE_INTERVAL threads */
pairwise_merge<<<sb_num,SAMPLE_INTERVAL>>>(d_input,half_size,d_sb_len_left,d_sb_len_right,sb_num,d_output);
checkCUDAError("kernel invocation\n");
/* copy the sorted results back to host */
cudaMemcpy(h_output, d_output, sizeof(int)*size, cudaMemcpyDeviceToHost);
/**/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(stderr,"Elapsed time = %f (s)\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/*******************************************/
/* Print the final scan result */
/*******************************************/
printf("The sorted array is :\n");
for(int i=0;i<size;++i) printf("%d ",h_output[i]);
printf("\n");
/* free device memory */
cudaFree(d_sb_len_left);
cudaFree(d_sb_len_right);
cudaFree(d_input);
cudaFree(d_output);
/* free host memory */
free(h_input);
free(h_output);
free(h_sb_len_left);
free(h_sb_len_right);
/**/
return 0;
}
/*function to test CUDA command*/
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (including this key)
Naive implementation.
Binary search can be used to implement more efficient function
*/
static inline __device__ int get_rank_inclusive(int key, int* arr, int len)
{
int rank=0;
while((rank < len) && (arr[rank]<=key)) ++rank;
return rank;
}
/*
Inline device function, to compute a rank of a "key" in an array "arr"
of length "len" (excluding this key)
Naive implementation.
Binary search can be used to implement more efficient function
*/
static inline __device__ int get_rank_exclusive(int key, int* arr, int len)
{
int rank=0;
while((rank < len) && (arr[rank]<key)) ++rank;
return rank;
}
|
71856b15b4679973854544759fab9848f60bbbc0.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "matx_pybind.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
constexpr int m = 15;
template <typename T> class DetSolverTest : public ::testing::Test {
protected:
void SetUp() override
{
pb = std::make_unique<MatXPybind>();
pb->InitAndRunTVGenerator<T>("00_solver", "det", "run", {m});
pb->NumpyToTensorView(Av, "A");
}
void TearDown() { pb.reset(); }
std::unique_ptr<MatXPybind> pb;
tensor_t<T, 2> Av{{m, m}};
tensor_t<T, 2> Atv{{m, m}};
tensor_t<T, 0> detv{};
};
template <typename TensorType>
class DetSolverTestNonComplexFloatTypes : public DetSolverTest<TensorType> {
};
TYPED_TEST_SUITE(DetSolverTestNonComplexFloatTypes,
MatXFloatNonComplexNonHalfTypes);
TYPED_TEST(DetSolverTestNonComplexFloatTypes, Determinant)
{
MATX_ENTER_HANDLER();
// cuSolver only supports col-major solving today, so we need to transpose,
// solve, then transpose again to compare to Python
transpose(this->Atv, this->Av, 0);
det(this->detv, this->Atv);
transpose(this->Av, this->Atv, 0); // Transpose back to row-major
hipStreamSynchronize(0);
MATX_TEST_ASSERT_COMPARE(this->pb, this->detv, "det", 0.1);
MATX_EXIT_HANDLER();
}
| 71856b15b4679973854544759fab9848f60bbbc0.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "matx_pybind.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
constexpr int m = 15;
template <typename T> class DetSolverTest : public ::testing::Test {
protected:
void SetUp() override
{
pb = std::make_unique<MatXPybind>();
pb->InitAndRunTVGenerator<T>("00_solver", "det", "run", {m});
pb->NumpyToTensorView(Av, "A");
}
void TearDown() { pb.reset(); }
std::unique_ptr<MatXPybind> pb;
tensor_t<T, 2> Av{{m, m}};
tensor_t<T, 2> Atv{{m, m}};
tensor_t<T, 0> detv{};
};
template <typename TensorType>
class DetSolverTestNonComplexFloatTypes : public DetSolverTest<TensorType> {
};
TYPED_TEST_SUITE(DetSolverTestNonComplexFloatTypes,
MatXFloatNonComplexNonHalfTypes);
TYPED_TEST(DetSolverTestNonComplexFloatTypes, Determinant)
{
MATX_ENTER_HANDLER();
// cuSolver only supports col-major solving today, so we need to transpose,
// solve, then transpose again to compare to Python
transpose(this->Atv, this->Av, 0);
det(this->detv, this->Atv);
transpose(this->Av, this->Atv, 0); // Transpose back to row-major
cudaStreamSynchronize(0);
MATX_TEST_ASSERT_COMPARE(this->pb, this->detv, "det", 0.1);
MATX_EXIT_HANDLER();
}
|
f240dd41cf214e74df61cf4fec017d253bd89f41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
hipMallocManaged (&a, size);
hipMallocManaged (&b, size);
hipMallocManaged (&c_cpu, size);
hipMallocManaged (&c_gpu, size);
// Initialize memory
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
hipLaunchKernelGGL(( matrixMulGPU) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c_gpu );
hipDeviceSynchronize(); // Wait for the GPU to finish before proceeding
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
hipFree(a); hipFree(b);
hipFree( c_cpu ); hipFree( c_gpu );
} | f240dd41cf214e74df61cf4fec017d253bd89f41.cu | #include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
cudaDeviceSynchronize(); // Wait for the GPU to finish before proceeding
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
} |
3ef9afb70bb7d05f8cbf7e2a362b0f401b408f90.hip | // !!! This is a file automatically generated by hipify!!!
#include "evaluator.cuh"
#include <library/cuda/wrappers/kernel.cuh>
#include <library/cuda/wrappers/kernel_helpers.cuh>
#include <library/cuda/wrappers/arch.cuh>
#include <library/cuda/wrappers/kernel_helpers.cuh>
#include <hip/hip_runtime.h>
#include <assert.h>
template<typename TFeatureType, TGPUDataInput::EFeatureLayout Layout>
struct TFeatureAccessor {
TFeatureAccessor() = default;
using TFeature = TFeatureType;
using TFeaturePtr = const TFeature*;
i32 Stride = 0;
i32 FeatureCount = 0;
i32 ObjectCount = 0;
TFeaturePtr FeaturesPtr = nullptr;
__forceinline__ __device__ TFeature operator()(i32 featureId, i32 objectId) const {
if (Layout == TGPUDataInput::EFeatureLayout::ColumnFirst) {
return objectId < ObjectCount && featureId < FeatureCount ?
__ldg(FeaturesPtr + featureId * Stride + objectId)
: NegativeInfty();
} else {
return objectId < ObjectCount && featureId < FeatureCount ?
__ldg(FeaturesPtr + featureId + objectId * Stride)
: NegativeInfty();
}
}
__forceinline__ __device__ int FeaturesCount() const {
return FeatureCount;
}
__forceinline__ __device__ int SamplesCount() const {
return ObjectCount;
}
};
constexpr ui32 ObjectsPerThread = 4;
constexpr ui32 TreeSubBlockWidth = 8;
constexpr ui32 ExtTreeBlockWidth = 128;
constexpr ui32 QuantizationDocBlockSize = 256;
constexpr ui32 BlockWidth = 256;
constexpr ui32 EvalDocBlockSize = BlockWidth / TreeSubBlockWidth;
static_assert(EvalDocBlockSize >= WarpSize, "EvalBlockSize should be greater than WarpSize");
using TTreeIndex = uint4;
void TCudaQuantizedData::SetDimensions(ui32 effectiveBucketCount, ui32 objectsCount) {
ObjectsCount = objectsCount;
EffectiveBucketCount = effectiveBucketCount;
const auto one32blockSize = WarpSize * effectiveBucketCount;
const auto desiredQuantBuff = one32blockSize * NKernel::CeilDivide<ui32>(objectsCount, 128) * 4;
if (BinarizedFeaturesBuffer.Size() < desiredQuantBuff) {
BinarizedFeaturesBuffer = TCudaVec<TCudaQuantizationBucket>(desiredQuantBuff, EMemoryType::Device);
}
}
void TEvaluationDataCache::PrepareCopyBufs(size_t bufSize, size_t objectsCount) {
if (CopyDataBufDevice.Size() < bufSize) {
CopyDataBufDevice = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Device);
}
if (CopyDataBufHost.Size() < bufSize) {
CopyDataBufHost = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Host);
}
if (ResultsFloatBuf.Size() < objectsCount) {
ResultsFloatBuf = TCudaVec<float>(AlignBy<2048>(objectsCount), EMemoryType::Device);
}
if (ResultsDoubleBuf.Size() < objectsCount) {
ResultsDoubleBuf = TCudaVec<double>(AlignBy<2048>(objectsCount), EMemoryType::Device);
}
}
template<typename TFloatFeatureAccessor>
__launch_bounds__(QuantizationDocBlockSize, 1)
__global__ void Binarize(
TFloatFeatureAccessor floatAccessor,
const float* __restrict__ borders,
const ui32* __restrict__ featureBorderOffsets,
const ui32* __restrict__ featureBordersCount,
const ui32* __restrict__ floatFeatureForBucketIdx,
const ui32 bucketsCount,
TCudaQuantizationBucket* __restrict__ target
) {
const int blockby32 = blockIdx.x * QuantizationDocBlockSize / WarpSize + threadIdx.x / WarpSize;
const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + threadIdx.x % WarpSize;
const int targetBucketIdx = blockIdx.y;
const float* featureBorders = borders + featureBorderOffsets[targetBucketIdx];
const int featureBorderCount = __ldg(featureBordersCount + targetBucketIdx);
const int featureIdx = floatFeatureForBucketIdx[targetBucketIdx];
__shared__ float bordersLocal[QuantizationDocBlockSize];
if (threadIdx.x < featureBorderCount) {
bordersLocal[threadIdx.x] = __ldg(featureBorders + threadIdx.x);
}
__syncthreads();
float4 features;
features.x = floatAccessor(featureIdx, firstDocForThread + 0 * WarpSize);
features.y = floatAccessor(featureIdx, firstDocForThread + 1 * WarpSize);
features.z = floatAccessor(featureIdx, firstDocForThread + 2 * WarpSize);
features.w = floatAccessor(featureIdx, firstDocForThread + 3 * WarpSize);
TCudaQuantizationBucket bins = { 0 };
#pragma unroll 8
for (int borderId = 0; borderId < featureBorderCount; ++borderId) {
const float border = bordersLocal[borderId];
bins.x += features.x > border;
bins.y += features.y > border;
bins.z += features.z > border;
bins.w += features.w > border;
}
if (firstDocForThread < floatAccessor.SamplesCount()) {
target[bucketsCount * WarpSize * blockby32 + targetBucketIdx * WarpSize + threadIdx.x % WarpSize] = bins;
}
}
template<int TreeDepth>
TTreeIndex __device__ __forceinline__ CalcIndexesUnwrapped(const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) {
TTreeIndex result = { 0 };
#pragma unroll TreeDepth
for (int depth = 0; depth < TreeDepth; ++depth) {
const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth);
TCudaQuantizationBucket buckets = __ldg(quantizedFeatures + bin.FeatureIdx);
result.x |= ((buckets.x) >= bin.FeatureVal) << depth;
result.y |= ((buckets.y) >= bin.FeatureVal) << depth;
result.z |= ((buckets.z) >= bin.FeatureVal) << depth;
result.w |= ((buckets.w) >= bin.FeatureVal) << depth;
}
return result;
}
TTreeIndex __device__ CalcIndexesBase(int TreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) {
TTreeIndex bins = { 0 };
for (int depth = 0; depth < TreeDepth; ++depth) {
const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth);
TCudaQuantizationBucket vals = __ldg(quantizedFeatures + bin.FeatureIdx);
bins.x |= ((vals.x) >= bin.FeatureVal) << depth;
bins.y |= ((vals.y) >= bin.FeatureVal) << depth;
bins.z |= ((vals.z) >= bin.FeatureVal) << depth;
bins.w |= ((vals.w) >= bin.FeatureVal) << depth;
}
return bins;
}
TTreeIndex __device__ __forceinline__ CalcTreeVals(int curTreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) {
switch (curTreeDepth) {
case 6:
return CalcIndexesUnwrapped<6>(curRepackedBinPtr, quantizedFeatures);
case 7:
return CalcIndexesUnwrapped<7>(curRepackedBinPtr, quantizedFeatures);
case 8:
return CalcIndexesUnwrapped<8>(curRepackedBinPtr, quantizedFeatures);
default:
return CalcIndexesBase(curTreeDepth, curRepackedBinPtr, quantizedFeatures);
}
}
__launch_bounds__(BlockWidth, 1)
__global__ void EvalObliviousTrees(
const TCudaQuantizationBucket* __restrict__ quantizedFeatures,
const ui32* __restrict__ treeSizes,
const ui32 treeCount,
const ui32* __restrict__ treeStartOffsets,
const TGPURepackedBin* __restrict__ repackedBins,
const ui32* __restrict__ firstLeafOfset,
const ui32 bucketsCount,
const TCudaEvaluatorLeafType* __restrict__ leafValues,
const ui32 documentCount,
TCudaEvaluatorLeafType* __restrict__ results) {
const int innerBlockBy32 = threadIdx.x / WarpSize;
const int blockby32 = blockIdx.y * EvalDocBlockSize / WarpSize + innerBlockBy32;
const int inBlockId = threadIdx.x % WarpSize;
const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + inBlockId;
quantizedFeatures += bucketsCount * WarpSize * blockby32 + threadIdx.x % WarpSize;
const int firstTreeIdx = TreeSubBlockWidth * ExtTreeBlockWidth * (threadIdx.y + TreeSubBlockWidth * blockIdx.x);
const int lastTreeIdx = min(firstTreeIdx + TreeSubBlockWidth * ExtTreeBlockWidth, treeCount);
double4 localResult = { 0 };
if (firstTreeIdx < lastTreeIdx && firstDocForThread < documentCount) {
const TGPURepackedBin* __restrict__ curRepackedBinPtr = repackedBins + __ldg(treeStartOffsets + firstTreeIdx);
leafValues += firstLeafOfset[firstTreeIdx];
int treeIdx = firstTreeIdx;
const int lastTreeBy2 = lastTreeIdx - ((lastTreeIdx - firstTreeIdx) & 0x3);
for (; treeIdx < lastTreeBy2; treeIdx += 2) {
const int curTreeDepth1 = __ldg(treeSizes + treeIdx);
const int curTreeDepth2 = __ldg(treeSizes + treeIdx + 1);
const TTreeIndex bins1 = CalcTreeVals(curTreeDepth1, curRepackedBinPtr, quantizedFeatures);
const TTreeIndex bins2 = CalcTreeVals(curTreeDepth2, curRepackedBinPtr + curTreeDepth1, quantizedFeatures);
const auto leafValues2 = leafValues + (1 << curTreeDepth1);
localResult.x += __ldg(leafValues + bins1.x) + __ldg(leafValues2 + bins2.x);
localResult.y += __ldg(leafValues + bins1.y) + __ldg(leafValues2 + bins2.y);
localResult.z += __ldg(leafValues + bins1.z) + __ldg(leafValues2 + bins2.z);
localResult.w += __ldg(leafValues + bins1.w) + __ldg(leafValues2 + bins2.w);
curRepackedBinPtr += curTreeDepth1 + curTreeDepth2;
leafValues = leafValues2 + (1 << curTreeDepth2);
}
for (; treeIdx < lastTreeIdx; ++treeIdx) {
const int curTreeDepth = __ldg(treeSizes + treeIdx);
const TTreeIndex bins = CalcTreeVals(curTreeDepth, curRepackedBinPtr, quantizedFeatures);
localResult.x += __ldg(leafValues + bins.x);
localResult.y += __ldg(leafValues + bins.y);
localResult.z += __ldg(leafValues + bins.z);
localResult.w += __ldg(leafValues + bins.w);
curRepackedBinPtr += curTreeDepth;
leafValues += (1 << curTreeDepth);
}
}
// TODO(kirillovs): reduce code is valid if those conditions met
static_assert(EvalDocBlockSize * ObjectsPerThread == 128, "");
static_assert(EvalDocBlockSize == 32, "");
__shared__ TCudaEvaluatorLeafType reduceVals[EvalDocBlockSize * ObjectsPerThread * TreeSubBlockWidth];
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 0 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.x;
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 1 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.y;
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 2 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.z;
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 3 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.w;
__syncthreads();
TCudaEvaluatorLeafType lr = reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize];
for (int i = 256; i < 256 * 4; i += 256) {
lr += reduceVals[i + threadIdx.x + threadIdx.y * EvalDocBlockSize];
}
reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] = lr;
__syncthreads();
if (threadIdx.y < ObjectsPerThread) {
TAtomicAdd<TCudaEvaluatorLeafType>::Add(
results + blockby32 * WarpSize * ObjectsPerThread + threadIdx.x + threadIdx.y * EvalDocBlockSize,
reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] + reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize + 128]
);
}
}
template<NCB::NModelEvaluation::EPredictionType PredictionType, bool OneDimension>
__global__ void ProcessResults(
const float* __restrict__ rawResults,
ui32 resultsSize,
double* hostMemResults,
ui32 approxDimension
) {
for (ui32 resultId = threadIdx.x; resultId < resultsSize; resultId += blockDim.x) {
if (PredictionType == NCB::NModelEvaluation::EPredictionType::RawFormulaVal) {
hostMemResults[resultId] = __ldg(rawResults + resultId);
} else if (PredictionType == NCB::NModelEvaluation::EPredictionType::Probability) {
if (OneDimension) {
hostMemResults[resultId] = 1 / (1 + exp(-__ldg(rawResults + resultId)));
} else {
// TODO(kirillovs): write softmax
assert(0);
}
} else {
if (OneDimension) {
hostMemResults[resultId] = __ldg(rawResults + resultId) > 0;
} else {
float maxVal = __ldg(rawResults);
ui32 maxPos = 0;
for (ui32 dim = 1; dim < approxDimension; ++dim) {
const float val = __ldg(rawResults + dim);
if (val > maxVal) {
maxVal = val;
maxPos = dim;
}
}
hostMemResults[resultId] = maxPos;
rawResults += approxDimension;
}
}
}
}
void TGPUCatboostEvaluationContext::EvalQuantizedData(
const TCudaQuantizedData* data,
size_t treeStart,
size_t treeEnd,
TArrayRef<double> result,
NCB::NModelEvaluation::EPredictionType predictionType
) const {
const dim3 treeCalcDimBlock(EvalDocBlockSize, TreeSubBlockWidth);
const dim3 treeCalcDimGrid(
NKernel::CeilDivide<unsigned int>(GPUModelData.TreeSizes.Size(), TreeSubBlockWidth * ExtTreeBlockWidth),
NKernel::CeilDivide<unsigned int>(data->GetObjectsCount(), EvalDocBlockSize * ObjectsPerThread)
);
ClearMemoryAsync(EvalDataCache.ResultsFloatBuf.AsArrayRef(), Stream);
hipLaunchKernelGGL(( EvalObliviousTrees), dim3(treeCalcDimGrid), dim3(treeCalcDimBlock), 0, Stream,
data->BinarizedFeaturesBuffer.Get(),
GPUModelData.TreeSizes.Get(),
GPUModelData.TreeSizes.Size(),
GPUModelData.TreeStartOffsets.Get(),
GPUModelData.TreeSplits.Get(),
GPUModelData.TreeFirstLeafOffsets.Get(),
GPUModelData.FloatFeatureForBucketIdx.Size(),
GPUModelData.ModelLeafs.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsFloatBuf.Get()
);
switch (predictionType) {
case NCB::NModelEvaluation::EPredictionType::RawFormulaVal:
hipLaunchKernelGGL(( ProcessResults<NCB::NModelEvaluation::EPredictionType::RawFormulaVal, true>), dim3(1), dim3(256), 0, Stream,
EvalDataCache.ResultsFloatBuf.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsDoubleBuf.Get(),
1
);
break;
case NCB::NModelEvaluation::EPredictionType::Exponent:
ythrow yexception() << "Unimplemented on GPU";
break;
case NCB::NModelEvaluation::EPredictionType::Probability:
hipLaunchKernelGGL(( ProcessResults<NCB::NModelEvaluation::EPredictionType::Probability, true>), dim3(1), dim3(256), 0, Stream,
EvalDataCache.ResultsFloatBuf.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsDoubleBuf.Get(),
1
);
break;
case NCB::NModelEvaluation::EPredictionType::Class:
hipLaunchKernelGGL(( ProcessResults<NCB::NModelEvaluation::EPredictionType::Class, true>), dim3(1), dim3(256), 0, Stream,
EvalDataCache.ResultsFloatBuf.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsDoubleBuf.Get(),
1
);
break;
}
MemoryCopyAsync<double>(EvalDataCache.ResultsDoubleBuf.Slice(0, data->GetObjectsCount()), result, Stream);
}
void TGPUCatboostEvaluationContext::QuantizeData(const TGPUDataInput& dataInput, TCudaQuantizedData* quantizedData) const{
const dim3 quantizationDimBlock(QuantizationDocBlockSize, 1);
const dim3 quantizationDimGrid(
NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, QuantizationDocBlockSize * ObjectsPerThread),
GPUModelData.BordersCount.Size() // float features from models
);
if (dataInput.FloatFeatureLayout == TGPUDataInput::EFeatureLayout::ColumnFirst) {
TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::ColumnFirst> floatFeatureAccessor;
floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount;
floatFeatureAccessor.Stride = dataInput.Stride;
floatFeatureAccessor.ObjectCount = dataInput.ObjectCount;
floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data();
hipLaunchKernelGGL(( Binarize), dim3(quantizationDimGrid), dim3(quantizationDimBlock), 0, Stream,
floatFeatureAccessor,
GPUModelData.FlatBordersVector.Get(),
GPUModelData.BordersOffsets.Get(),
GPUModelData.BordersCount.Get(),
GPUModelData.FloatFeatureForBucketIdx.Get(),
GPUModelData.FloatFeatureForBucketIdx.Size(),
quantizedData->BinarizedFeaturesBuffer.Get()
);
} else {
TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::RowFirst> floatFeatureAccessor;
floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount;
floatFeatureAccessor.ObjectCount = dataInput.ObjectCount;
floatFeatureAccessor.Stride = dataInput.Stride;
floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data();
hipLaunchKernelGGL(( Binarize), dim3(quantizationDimGrid), dim3(quantizationDimBlock), 0, Stream,
floatFeatureAccessor,
GPUModelData.FlatBordersVector.Get(),
GPUModelData.BordersOffsets.Get(),
GPUModelData.BordersCount.Get(),
GPUModelData.FloatFeatureForBucketIdx.Get(),
GPUModelData.FloatFeatureForBucketIdx.Size(),
quantizedData->BinarizedFeaturesBuffer.Get()
);
}
}
void TGPUCatboostEvaluationContext::EvalData(
const TGPUDataInput& dataInput,
size_t treeStart,
size_t treeEnd,
TArrayRef<double> result,
NCB::NModelEvaluation::EPredictionType predictionType) const {
TCudaQuantizedData quantizedData;
quantizedData.SetDimensions(GPUModelData.FloatFeatureForBucketIdx.Size(), dataInput.ObjectCount);
QuantizeData(dataInput, &quantizedData);
EvalQuantizedData(&quantizedData, treeStart, treeEnd, result, predictionType);
}
| 3ef9afb70bb7d05f8cbf7e2a362b0f401b408f90.cu | #include "evaluator.cuh"
#include <library/cuda/wrappers/kernel.cuh>
#include <library/cuda/wrappers/kernel_helpers.cuh>
#include <library/cuda/wrappers/arch.cuh>
#include <library/cuda/wrappers/kernel_helpers.cuh>
#include <cuda_runtime.h>
#include <assert.h>
template<typename TFeatureType, TGPUDataInput::EFeatureLayout Layout>
struct TFeatureAccessor {
TFeatureAccessor() = default;
using TFeature = TFeatureType;
using TFeaturePtr = const TFeature*;
i32 Stride = 0;
i32 FeatureCount = 0;
i32 ObjectCount = 0;
TFeaturePtr FeaturesPtr = nullptr;
__forceinline__ __device__ TFeature operator()(i32 featureId, i32 objectId) const {
if (Layout == TGPUDataInput::EFeatureLayout::ColumnFirst) {
return objectId < ObjectCount && featureId < FeatureCount ?
__ldg(FeaturesPtr + featureId * Stride + objectId)
: NegativeInfty();
} else {
return objectId < ObjectCount && featureId < FeatureCount ?
__ldg(FeaturesPtr + featureId + objectId * Stride)
: NegativeInfty();
}
}
__forceinline__ __device__ int FeaturesCount() const {
return FeatureCount;
}
__forceinline__ __device__ int SamplesCount() const {
return ObjectCount;
}
};
constexpr ui32 ObjectsPerThread = 4;
constexpr ui32 TreeSubBlockWidth = 8;
constexpr ui32 ExtTreeBlockWidth = 128;
constexpr ui32 QuantizationDocBlockSize = 256;
constexpr ui32 BlockWidth = 256;
constexpr ui32 EvalDocBlockSize = BlockWidth / TreeSubBlockWidth;
static_assert(EvalDocBlockSize >= WarpSize, "EvalBlockSize should be greater than WarpSize");
using TTreeIndex = uint4;
void TCudaQuantizedData::SetDimensions(ui32 effectiveBucketCount, ui32 objectsCount) {
ObjectsCount = objectsCount;
EffectiveBucketCount = effectiveBucketCount;
const auto one32blockSize = WarpSize * effectiveBucketCount;
const auto desiredQuantBuff = one32blockSize * NKernel::CeilDivide<ui32>(objectsCount, 128) * 4;
if (BinarizedFeaturesBuffer.Size() < desiredQuantBuff) {
BinarizedFeaturesBuffer = TCudaVec<TCudaQuantizationBucket>(desiredQuantBuff, EMemoryType::Device);
}
}
void TEvaluationDataCache::PrepareCopyBufs(size_t bufSize, size_t objectsCount) {
if (CopyDataBufDevice.Size() < bufSize) {
CopyDataBufDevice = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Device);
}
if (CopyDataBufHost.Size() < bufSize) {
CopyDataBufHost = TCudaVec<float>(AlignBy<2048>(bufSize), EMemoryType::Host);
}
if (ResultsFloatBuf.Size() < objectsCount) {
ResultsFloatBuf = TCudaVec<float>(AlignBy<2048>(objectsCount), EMemoryType::Device);
}
if (ResultsDoubleBuf.Size() < objectsCount) {
ResultsDoubleBuf = TCudaVec<double>(AlignBy<2048>(objectsCount), EMemoryType::Device);
}
}
template<typename TFloatFeatureAccessor>
__launch_bounds__(QuantizationDocBlockSize, 1)
__global__ void Binarize(
TFloatFeatureAccessor floatAccessor,
const float* __restrict__ borders,
const ui32* __restrict__ featureBorderOffsets,
const ui32* __restrict__ featureBordersCount,
const ui32* __restrict__ floatFeatureForBucketIdx,
const ui32 bucketsCount,
TCudaQuantizationBucket* __restrict__ target
) {
const int blockby32 = blockIdx.x * QuantizationDocBlockSize / WarpSize + threadIdx.x / WarpSize;
const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + threadIdx.x % WarpSize;
const int targetBucketIdx = blockIdx.y;
const float* featureBorders = borders + featureBorderOffsets[targetBucketIdx];
const int featureBorderCount = __ldg(featureBordersCount + targetBucketIdx);
const int featureIdx = floatFeatureForBucketIdx[targetBucketIdx];
__shared__ float bordersLocal[QuantizationDocBlockSize];
if (threadIdx.x < featureBorderCount) {
bordersLocal[threadIdx.x] = __ldg(featureBorders + threadIdx.x);
}
__syncthreads();
float4 features;
features.x = floatAccessor(featureIdx, firstDocForThread + 0 * WarpSize);
features.y = floatAccessor(featureIdx, firstDocForThread + 1 * WarpSize);
features.z = floatAccessor(featureIdx, firstDocForThread + 2 * WarpSize);
features.w = floatAccessor(featureIdx, firstDocForThread + 3 * WarpSize);
TCudaQuantizationBucket bins = { 0 };
#pragma unroll 8
for (int borderId = 0; borderId < featureBorderCount; ++borderId) {
const float border = bordersLocal[borderId];
bins.x += features.x > border;
bins.y += features.y > border;
bins.z += features.z > border;
bins.w += features.w > border;
}
if (firstDocForThread < floatAccessor.SamplesCount()) {
target[bucketsCount * WarpSize * blockby32 + targetBucketIdx * WarpSize + threadIdx.x % WarpSize] = bins;
}
}
template<int TreeDepth>
TTreeIndex __device__ __forceinline__ CalcIndexesUnwrapped(const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) {
TTreeIndex result = { 0 };
#pragma unroll TreeDepth
for (int depth = 0; depth < TreeDepth; ++depth) {
const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth);
TCudaQuantizationBucket buckets = __ldg(quantizedFeatures + bin.FeatureIdx);
result.x |= ((buckets.x) >= bin.FeatureVal) << depth;
result.y |= ((buckets.y) >= bin.FeatureVal) << depth;
result.z |= ((buckets.z) >= bin.FeatureVal) << depth;
result.w |= ((buckets.w) >= bin.FeatureVal) << depth;
}
return result;
}
TTreeIndex __device__ CalcIndexesBase(int TreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) {
TTreeIndex bins = { 0 };
for (int depth = 0; depth < TreeDepth; ++depth) {
const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth);
TCudaQuantizationBucket vals = __ldg(quantizedFeatures + bin.FeatureIdx);
bins.x |= ((vals.x) >= bin.FeatureVal) << depth;
bins.y |= ((vals.y) >= bin.FeatureVal) << depth;
bins.z |= ((vals.z) >= bin.FeatureVal) << depth;
bins.w |= ((vals.w) >= bin.FeatureVal) << depth;
}
return bins;
}
TTreeIndex __device__ __forceinline__ CalcTreeVals(int curTreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) {
switch (curTreeDepth) {
case 6:
return CalcIndexesUnwrapped<6>(curRepackedBinPtr, quantizedFeatures);
case 7:
return CalcIndexesUnwrapped<7>(curRepackedBinPtr, quantizedFeatures);
case 8:
return CalcIndexesUnwrapped<8>(curRepackedBinPtr, quantizedFeatures);
default:
return CalcIndexesBase(curTreeDepth, curRepackedBinPtr, quantizedFeatures);
}
}
__launch_bounds__(BlockWidth, 1)
__global__ void EvalObliviousTrees(
const TCudaQuantizationBucket* __restrict__ quantizedFeatures,
const ui32* __restrict__ treeSizes,
const ui32 treeCount,
const ui32* __restrict__ treeStartOffsets,
const TGPURepackedBin* __restrict__ repackedBins,
const ui32* __restrict__ firstLeafOfset,
const ui32 bucketsCount,
const TCudaEvaluatorLeafType* __restrict__ leafValues,
const ui32 documentCount,
TCudaEvaluatorLeafType* __restrict__ results) {
const int innerBlockBy32 = threadIdx.x / WarpSize;
const int blockby32 = blockIdx.y * EvalDocBlockSize / WarpSize + innerBlockBy32;
const int inBlockId = threadIdx.x % WarpSize;
const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + inBlockId;
quantizedFeatures += bucketsCount * WarpSize * blockby32 + threadIdx.x % WarpSize;
const int firstTreeIdx = TreeSubBlockWidth * ExtTreeBlockWidth * (threadIdx.y + TreeSubBlockWidth * blockIdx.x);
const int lastTreeIdx = min(firstTreeIdx + TreeSubBlockWidth * ExtTreeBlockWidth, treeCount);
double4 localResult = { 0 };
if (firstTreeIdx < lastTreeIdx && firstDocForThread < documentCount) {
const TGPURepackedBin* __restrict__ curRepackedBinPtr = repackedBins + __ldg(treeStartOffsets + firstTreeIdx);
leafValues += firstLeafOfset[firstTreeIdx];
int treeIdx = firstTreeIdx;
const int lastTreeBy2 = lastTreeIdx - ((lastTreeIdx - firstTreeIdx) & 0x3);
for (; treeIdx < lastTreeBy2; treeIdx += 2) {
const int curTreeDepth1 = __ldg(treeSizes + treeIdx);
const int curTreeDepth2 = __ldg(treeSizes + treeIdx + 1);
const TTreeIndex bins1 = CalcTreeVals(curTreeDepth1, curRepackedBinPtr, quantizedFeatures);
const TTreeIndex bins2 = CalcTreeVals(curTreeDepth2, curRepackedBinPtr + curTreeDepth1, quantizedFeatures);
const auto leafValues2 = leafValues + (1 << curTreeDepth1);
localResult.x += __ldg(leafValues + bins1.x) + __ldg(leafValues2 + bins2.x);
localResult.y += __ldg(leafValues + bins1.y) + __ldg(leafValues2 + bins2.y);
localResult.z += __ldg(leafValues + bins1.z) + __ldg(leafValues2 + bins2.z);
localResult.w += __ldg(leafValues + bins1.w) + __ldg(leafValues2 + bins2.w);
curRepackedBinPtr += curTreeDepth1 + curTreeDepth2;
leafValues = leafValues2 + (1 << curTreeDepth2);
}
for (; treeIdx < lastTreeIdx; ++treeIdx) {
const int curTreeDepth = __ldg(treeSizes + treeIdx);
const TTreeIndex bins = CalcTreeVals(curTreeDepth, curRepackedBinPtr, quantizedFeatures);
localResult.x += __ldg(leafValues + bins.x);
localResult.y += __ldg(leafValues + bins.y);
localResult.z += __ldg(leafValues + bins.z);
localResult.w += __ldg(leafValues + bins.w);
curRepackedBinPtr += curTreeDepth;
leafValues += (1 << curTreeDepth);
}
}
// TODO(kirillovs): reduce code is valid if those conditions met
static_assert(EvalDocBlockSize * ObjectsPerThread == 128, "");
static_assert(EvalDocBlockSize == 32, "");
__shared__ TCudaEvaluatorLeafType reduceVals[EvalDocBlockSize * ObjectsPerThread * TreeSubBlockWidth];
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 0 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.x;
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 1 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.y;
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 2 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.z;
reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 3 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.w;
__syncthreads();
TCudaEvaluatorLeafType lr = reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize];
for (int i = 256; i < 256 * 4; i += 256) {
lr += reduceVals[i + threadIdx.x + threadIdx.y * EvalDocBlockSize];
}
reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] = lr;
__syncthreads();
if (threadIdx.y < ObjectsPerThread) {
TAtomicAdd<TCudaEvaluatorLeafType>::Add(
results + blockby32 * WarpSize * ObjectsPerThread + threadIdx.x + threadIdx.y * EvalDocBlockSize,
reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] + reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize + 128]
);
}
}
template<NCB::NModelEvaluation::EPredictionType PredictionType, bool OneDimension>
__global__ void ProcessResults(
const float* __restrict__ rawResults,
ui32 resultsSize,
double* hostMemResults,
ui32 approxDimension
) {
for (ui32 resultId = threadIdx.x; resultId < resultsSize; resultId += blockDim.x) {
if (PredictionType == NCB::NModelEvaluation::EPredictionType::RawFormulaVal) {
hostMemResults[resultId] = __ldg(rawResults + resultId);
} else if (PredictionType == NCB::NModelEvaluation::EPredictionType::Probability) {
if (OneDimension) {
hostMemResults[resultId] = 1 / (1 + exp(-__ldg(rawResults + resultId)));
} else {
// TODO(kirillovs): write softmax
assert(0);
}
} else {
if (OneDimension) {
hostMemResults[resultId] = __ldg(rawResults + resultId) > 0;
} else {
float maxVal = __ldg(rawResults);
ui32 maxPos = 0;
for (ui32 dim = 1; dim < approxDimension; ++dim) {
const float val = __ldg(rawResults + dim);
if (val > maxVal) {
maxVal = val;
maxPos = dim;
}
}
hostMemResults[resultId] = maxPos;
rawResults += approxDimension;
}
}
}
}
void TGPUCatboostEvaluationContext::EvalQuantizedData(
const TCudaQuantizedData* data,
size_t treeStart,
size_t treeEnd,
TArrayRef<double> result,
NCB::NModelEvaluation::EPredictionType predictionType
) const {
const dim3 treeCalcDimBlock(EvalDocBlockSize, TreeSubBlockWidth);
const dim3 treeCalcDimGrid(
NKernel::CeilDivide<unsigned int>(GPUModelData.TreeSizes.Size(), TreeSubBlockWidth * ExtTreeBlockWidth),
NKernel::CeilDivide<unsigned int>(data->GetObjectsCount(), EvalDocBlockSize * ObjectsPerThread)
);
ClearMemoryAsync(EvalDataCache.ResultsFloatBuf.AsArrayRef(), Stream);
EvalObliviousTrees<<<treeCalcDimGrid, treeCalcDimBlock, 0, Stream>>> (
data->BinarizedFeaturesBuffer.Get(),
GPUModelData.TreeSizes.Get(),
GPUModelData.TreeSizes.Size(),
GPUModelData.TreeStartOffsets.Get(),
GPUModelData.TreeSplits.Get(),
GPUModelData.TreeFirstLeafOffsets.Get(),
GPUModelData.FloatFeatureForBucketIdx.Size(),
GPUModelData.ModelLeafs.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsFloatBuf.Get()
);
switch (predictionType) {
case NCB::NModelEvaluation::EPredictionType::RawFormulaVal:
ProcessResults<NCB::NModelEvaluation::EPredictionType::RawFormulaVal, true><<<1, 256, 0, Stream>>> (
EvalDataCache.ResultsFloatBuf.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsDoubleBuf.Get(),
1
);
break;
case NCB::NModelEvaluation::EPredictionType::Exponent:
ythrow yexception() << "Unimplemented on GPU";
break;
case NCB::NModelEvaluation::EPredictionType::Probability:
ProcessResults<NCB::NModelEvaluation::EPredictionType::Probability, true><<<1, 256, 0, Stream>>> (
EvalDataCache.ResultsFloatBuf.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsDoubleBuf.Get(),
1
);
break;
case NCB::NModelEvaluation::EPredictionType::Class:
ProcessResults<NCB::NModelEvaluation::EPredictionType::Class, true><<<1, 256, 0, Stream>>> (
EvalDataCache.ResultsFloatBuf.Get(),
data->GetObjectsCount(),
EvalDataCache.ResultsDoubleBuf.Get(),
1
);
break;
}
MemoryCopyAsync<double>(EvalDataCache.ResultsDoubleBuf.Slice(0, data->GetObjectsCount()), result, Stream);
}
void TGPUCatboostEvaluationContext::QuantizeData(const TGPUDataInput& dataInput, TCudaQuantizedData* quantizedData) const{
const dim3 quantizationDimBlock(QuantizationDocBlockSize, 1);
const dim3 quantizationDimGrid(
NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, QuantizationDocBlockSize * ObjectsPerThread),
GPUModelData.BordersCount.Size() // float features from models
);
if (dataInput.FloatFeatureLayout == TGPUDataInput::EFeatureLayout::ColumnFirst) {
TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::ColumnFirst> floatFeatureAccessor;
floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount;
floatFeatureAccessor.Stride = dataInput.Stride;
floatFeatureAccessor.ObjectCount = dataInput.ObjectCount;
floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data();
Binarize<<<quantizationDimGrid, quantizationDimBlock, 0, Stream>>> (
floatFeatureAccessor,
GPUModelData.FlatBordersVector.Get(),
GPUModelData.BordersOffsets.Get(),
GPUModelData.BordersCount.Get(),
GPUModelData.FloatFeatureForBucketIdx.Get(),
GPUModelData.FloatFeatureForBucketIdx.Size(),
quantizedData->BinarizedFeaturesBuffer.Get()
);
} else {
TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::RowFirst> floatFeatureAccessor;
floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount;
floatFeatureAccessor.ObjectCount = dataInput.ObjectCount;
floatFeatureAccessor.Stride = dataInput.Stride;
floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.data();
Binarize<<<quantizationDimGrid, quantizationDimBlock, 0, Stream>>> (
floatFeatureAccessor,
GPUModelData.FlatBordersVector.Get(),
GPUModelData.BordersOffsets.Get(),
GPUModelData.BordersCount.Get(),
GPUModelData.FloatFeatureForBucketIdx.Get(),
GPUModelData.FloatFeatureForBucketIdx.Size(),
quantizedData->BinarizedFeaturesBuffer.Get()
);
}
}
void TGPUCatboostEvaluationContext::EvalData(
const TGPUDataInput& dataInput,
size_t treeStart,
size_t treeEnd,
TArrayRef<double> result,
NCB::NModelEvaluation::EPredictionType predictionType) const {
TCudaQuantizedData quantizedData;
quantizedData.SetDimensions(GPUModelData.FloatFeatureForBucketIdx.Size(), dataInput.ObjectCount);
QuantizeData(dataInput, &quantizedData);
EvalQuantizedData(&quantizedData, treeStart, treeEnd, result, predictionType);
}
|
baba3e731efcb1880744a60fe032f4bdd976f6cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/optimizers/adam_optimizer.hpp"
namespace {
__global__ void adam_kernel(int len, float* weight, const float* wgrad, float* m, float* v,
float alpha_t, float beta1, float beta2, float epsilon) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
int scaler = 1;
#ifdef SCALE_128
scaler = 128;
#elif SCALE_256
scaler = 256;
#elif SCALE_512
scaler = 512;
#elif SCALE_1024
scaler = 1024;
#else
scaler = 1;
#endif
if (i < len) {
float gi = wgrad[i];
float mi = beta1 * m[i] + (1 - beta1) * gi;
float vi = beta2 * v[i] + (1 - beta2) * gi * gi;
m[i] = mi;
v[i] = vi;
weight[i] -= (double)alpha_t * mi / (sqrt(vi) + epsilon) / scaler;
}
}
} // namespace
namespace HugeCTR {
void AdamOptimizer::update(hipStream_t stream) {
int old_device = -1;
CK_CUDA_THROW_(get_set_device(device_id_, &old_device));
const int len = weight_.get_num_elements();
const int block_dim = 256;
const int grid_dim = (len - 1) / block_dim + 1;
float* weight = weight_.get_ptr_with_offset(0);
const float* wgrad = wgrad_.get_ptr_with_offset(0);
float* m = m_.get_ptr_with_offset(0);
float* v = v_.get_ptr_with_offset(0);
++t_;
const float alpha_t = lr_ * sqrt(1 - pow(beta2_, t_)) / (1 - pow(beta1_, t_));
hipLaunchKernelGGL(( adam_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, len, weight, wgrad, m, v, alpha_t, beta1_, beta2_,
epsilon_);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
CK_CUDA_THROW_(get_set_device(old_device));
}
} // namespace HugeCTR
| baba3e731efcb1880744a60fe032f4bdd976f6cb.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/optimizers/adam_optimizer.hpp"
namespace {
__global__ void adam_kernel(int len, float* weight, const float* wgrad, float* m, float* v,
float alpha_t, float beta1, float beta2, float epsilon) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
int scaler = 1;
#ifdef SCALE_128
scaler = 128;
#elif SCALE_256
scaler = 256;
#elif SCALE_512
scaler = 512;
#elif SCALE_1024
scaler = 1024;
#else
scaler = 1;
#endif
if (i < len) {
float gi = wgrad[i];
float mi = beta1 * m[i] + (1 - beta1) * gi;
float vi = beta2 * v[i] + (1 - beta2) * gi * gi;
m[i] = mi;
v[i] = vi;
weight[i] -= (double)alpha_t * mi / (sqrt(vi) + epsilon) / scaler;
}
}
} // namespace
namespace HugeCTR {
void AdamOptimizer::update(cudaStream_t stream) {
int old_device = -1;
CK_CUDA_THROW_(get_set_device(device_id_, &old_device));
const int len = weight_.get_num_elements();
const int block_dim = 256;
const int grid_dim = (len - 1) / block_dim + 1;
float* weight = weight_.get_ptr_with_offset(0);
const float* wgrad = wgrad_.get_ptr_with_offset(0);
float* m = m_.get_ptr_with_offset(0);
float* v = v_.get_ptr_with_offset(0);
++t_;
const float alpha_t = lr_ * sqrt(1 - pow(beta2_, t_)) / (1 - pow(beta1_, t_));
adam_kernel<<<grid_dim, block_dim, 0, stream>>>(len, weight, wgrad, m, v, alpha_t, beta1_, beta2_,
epsilon_);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
CK_CUDA_THROW_(get_set_device(old_device));
}
} // namespace HugeCTR
|
7e93a81a5f2b3fc33b36f742a8893b9f196f1ee3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0;
}
}
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
} | 7e93a81a5f2b3fc33b36f742a8893b9f196f1ee3.cu | #include "includes.h"
extern "C" {
}
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0;
}
}
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
} |
bac9e96bfb6d94a3ec7b6c1481c3284e1cce24a9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ConvRbmTrainer_gpu.cu
*
* Created on: Mar 5, 2012
* Author: tombr
*/
#define BOOST_TYPEOF_COMPLIANT
#include "ConvRbmTrainer.h"
#include <iostream>
#include <sstream>
#include <capputils/Verifier.h>
#include <capputils/Logbook.h>
#include <boost/timer.hpp>
#include <tbblas/conv.hpp>
#include <tbblas/sum.hpp>
#include <tbblas/flip.hpp>
#include <tbblas/dot.hpp>
#include <thrust/inner_product.h>
//#include "sampling.hpp"
#include "RbmModel.h"
#include <hiprand/hiprand.h>
#include <culib/CulibException.h>
#include <culib/util.h>
#include <ctime>
namespace gapputils {
namespace ml {
#define LOCATE(a,b) std::cout << #b": " << (char*)&a._##b - (char*)&a << std::endl
template<class T>
struct softmax : thrust::binary_function<T, unsigned, T> {
softmax(unsigned width, unsigned blockSize) : width(width), blockSize(blockSize) { }
__host__ __device__
T operator()(const T& value, const unsigned& idx) const {
T res = 0;
const int offset = (idx % width) % blockSize + ((idx / width) % blockSize) * width;
for (unsigned j = 0; j < blockSize; ++j)
for (unsigned i = 0; i < blockSize; ++i)
res += exp(*(&value + i + j * width - offset));
return exp(value) / (1 + res);
}
private:
unsigned blockSize, width;
};
void printMemoryAtLine(int line) {
std::stringstream stream;
stream << "line " << line;
culib::printMemoryStats(stream.str().c_str());
CULIB_CHECK_ERROR();
}
#define TRACE printMemoryAtLine(__LINE__);
//#define TRACE
class timer {
private:
time_t start;
public:
timer() {
start = time(0);
}
time_t elapsed() const {
return time(0) - start;
}
void restart() {
start = time(0);
}
};
void ConvRbmTrainer::execute(gapputils::workflow::IProgressMonitor* monitor) const {
using namespace thrust::placeholders;
using capputils::Severity;
capputils::Logbook& dlog = getLogbook();
dlog.setSeverity(Severity::Message);
ml::timer timer;
if (!data)
data = new ConvRbmTrainer();
// std::cout << "Device:" << std::endl;
// ConvRbmTrainer test;
// LOCATE(test, InitialModel);
// LOCATE(test, Tensors);
// LOCATE(test, SampleVisibles);
// LOCATE(test, EpochCount);
// LOCATE(test, BatchSize);
// LOCATE(test, LearningRate);
// LOCATE(test, Model);
if (!capputils::Verifier::Valid(*this, dlog))
return;
if (!getInitialModel()) {
dlog(Severity::Warning) << "No initial model given. Aborting!";
return;
}
if (!getTensors() || getTensors()->size() == 0) {
dlog(Severity::Warning) << "No training data given. Aborting!";
return;
}
dlog() << "Building ConvRBM ...";
// std::cout << "[Info] device size: " << sizeof(*this) << std::endl;
hiprandGenerator_t gen;
hiprandStatus_t status;
if ((status = hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)) != HIPRAND_STATUS_SUCCESS) {
dlog(Severity::Warning) << "Could not create random number generator: " << status;
return;
}
const unsigned sampleCount = getTensors()->size();
const int batchSize = getBatchSize();
boost::shared_ptr<ConvRbmModel> crbm = getInitialModel()->clone();
const unsigned dimCount = ConvRbmModel::dimCount;
const unsigned filterCount = crbm->getFilters()->size();
const unsigned blockSize = crbm->getPoolingBlockSize();
const host_tensor_t::dim_t& filterDim = crbm->getFilters()->at(0)->size();
const host_tensor_t::dim_t& inputDim = getTensors()->at(0)->size();
host_tensor_t::dim_t layerDim, paddedDim, start;
int filterWeightCount = 1, layerVoxelCount = 1, inputVoxelCount = 1;
for (unsigned i = 0; i < dimCount; ++i) {
assert(inputDim[i] >= filterDim[i]);
layerDim[i] = inputDim[i] - filterDim[i] + 1;
paddedDim[i] = inputDim[i] + filterDim[i] - 1;
start[i] = filterDim[i] - 1;
filterWeightCount *= filterDim[i];
layerVoxelCount *= layerDim[i];
inputVoxelCount *= inputDim[i];
}
assert((layerDim[0] % blockSize) == 0);
assert((layerDim[1] % blockSize) == 0);
assert((layerVoxelCount % 2) == 0); // TODO: loosen this constrain. Means, use temporary array to generate
assert((inputVoxelCount % 2) == 0); // random number (count must be a multiple of 2)
// Train the RBM
std::vector<boost::shared_ptr<host_tensor_t> >& tensors = *getTensors();
std::vector<boost::shared_ptr<host_tensor_t> > X;
for (unsigned i = 0; i < tensors.size(); ++i) {
X.push_back(boost::shared_ptr<host_tensor_t>(new host_tensor_t(*tensors[i])));
}
if (crbm->getIsGaussian()) {
// Calculate the mean and normalize the data
value_t mean = crbm->getMean();
value_t stddev = crbm->getStddev();
dlog() << "Mean and sd = " << mean << ", " << stddev;
for (unsigned i = 0; i < X.size(); ++i)
*X[i] = *X[i] - mean;
for (unsigned i = 0; i < X.size(); ++i) {
*X[i] = *X[i] / stddev;
}
}
// Copy filters to the device
std::vector<boost::shared_ptr<host_tensor_t> >& filters = *crbm->getFilters();
std::vector<device_tensor_t > F;
for (unsigned i = 0; i < filters.size(); ++i) {
device_tensor_t filter(filters[i]->size());
thrust::copy(filters[i]->begin(), filters[i]->end(), filter.begin());
F.push_back(filter);
}
value_t b = crbm->getVisibleBias();
std::vector<value_t>& c = *crbm->getHiddenBiases();
dlog() << "ConvRBM initialized: " << timer.elapsed() << " s";
// Start the learning
const int batchCount = sampleCount / batchSize;
value_t epsilonw = getLearningRate(); // Learning rate for weights
value_t epsilonvb = getLearningRate(); // Learning rate for biases of visible units
value_t epsilonhb = getLearningRate(); // Learning rate for biases of hidden units
value_t weightcost = 0.0002;
value_t initialmomentum = 0.5; //65; // 0.5f;
value_t finalmomentum = 0.9; // 65; // 0.9f;
value_t momentum;
culib::printMemoryStats("ConvRbmTrainer initialized");
device_tensor_t v(inputDim), vneg(inputDim), vtemp(inputDim), padded(paddedDim);
thrust::fill(padded.begin(), padded.end(), value_t(0));
std::vector<device_tensor_t> poshidprobs, poshidstates, posvishid, neghidprobs, neghidstates, negvishid, Finc, Fincbatch;
dlog(Severity::Trace) << "layer dim = " << layerDim[0] << ", " << layerDim[1] << ", " << layerDim[2];
for (unsigned i = 0; i < filterCount; ++i) {
device_tensor_t tens = device_tensor_t(layerDim);
poshidprobs.push_back(tens);
tens = device_tensor_t(layerDim);
poshidstates.push_back(tens);
tens = device_tensor_t(filterDim);
posvishid.push_back(tens);
neghidprobs.push_back(device_tensor_t(layerDim));
neghidstates.push_back(device_tensor_t(layerDim));
negvishid.push_back(device_tensor_t(filterDim));
Finc.push_back(device_tensor_t(filterDim));
thrust::fill(Finc[i].begin(), Finc[i].end(), value_t(0));
Fincbatch.push_back(device_tensor_t(filterDim));
}
value_t posvisact, negvisact, binc = 0, bincbatch;
std::vector<value_t> poshidact(filterCount), neghidact(filterCount),
cinc(filterCount, 0), cincbatch(filterCount, 0),
cspa(filterCount, 0), cspabatch(filterCount, 0);
const int epochCount = getEpochCount();
dlog() << "Preparation finished after " << timer.elapsed() << " s";
CULIB_CHECK_ERROR();
culib::printMemoryStats("ConvRbmTrainer memory allocated");
dlog() << "Starting training";
timer.restart();
if (epochCount && getShowProgress()) {
boost::shared_ptr<std::vector<boost::shared_ptr<host_tensor_t> > > debugFilters(
new std::vector<boost::shared_ptr<host_tensor_t> >());
for (unsigned i = 0; i < filterCount; ++i)
debugFilters->push_back(boost::shared_ptr<host_tensor_t> (new host_tensor_t(F[i])));
data->setFilters(debugFilters);
}
if (monitor)
monitor->reportProgress(0, getShowProgress());
for (int iEpoch = 0; iEpoch < epochCount && (monitor ? !monitor->getAbortRequested() : true); ++iEpoch) {
double error = 0;
for (int iBatch = 0; iBatch < batchCount && (monitor ? !monitor->getAbortRequested() : true); ++iBatch) {
for (unsigned k = 0; k < filterCount; ++k) {
thrust::fill(Fincbatch[k].begin(), Fincbatch[k].end(), value_t(0));
cincbatch[k] = 0;
cspabatch[k] = 0;
}
bincbatch = 0;
for (int iSample = 0; iSample < batchSize && (monitor ? !monitor->getAbortRequested() : true); ++iSample) {
/*** START POSITIVE PHASE ***/
const int randomSample = rand() % sampleCount;
// Get current sample
if (getUseRandomSamples())
thrust::copy(X[randomSample]->begin(), X[randomSample]->end(), v.begin());
else
thrust::copy(X[iSample + iBatch * batchSize]->begin(), X[iSample + iBatch * batchSize]->end(), v.begin());
// For each filter (Could be written as a single 4D convolution in case of a 2D image and 3D filter))
for (unsigned k = 0; k < filterCount && (monitor ? !monitor->getAbortRequested() : true); ++k) {
// Calculate p(h_k | v, F) = sigm((~F_k * v) + c_k)
poshidstates[k] = tbblas::conv(tbblas::flip(F[k]), v);
poshidstates[k] = poshidstates[k]+ c[k]; // x = ~F_k * v + c_k
// I'm using the state array here for the sum. Not nice but works fine and saves some space
thrust::transform(poshidstates[k].data().begin(), poshidstates[k].data().end(),
thrust::make_counting_iterator(0), poshidprobs[k].data().begin(),
softmax<value_t>(layerDim[0], blockSize));
// thrust::transform(poshidstates[k].data().begin(), poshidstates[k].data().end(), // x = sigm(x)
// poshidprobs[k].data().begin(), sigmoid<value_t>());
// Calculate energy and the total activation of the hidden units
posvishid[k] = tbblas::conv(tbblas::flip(poshidprobs[k]), v); // ~h_k * v
poshidact[k] = tbblas::sum(poshidprobs[k]);
if (iEpoch || !getCalculateBaseline())
cspabatch[k] = cspabatch[k] + getSparsityTarget() - tbblas::sum(poshidprobs[k]) / poshidprobs[k].data().size();
// fill states with random numbers which are then used to sample the units
// TODO: use hiprandGenerateUniform if value_t == float
if ((status = hiprandGenerateUniformDouble(gen,
poshidstates[k].data().data().get(),
poshidstates[k].data().size())) != HIPRAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not generate random numbers: " << status;
return;
}
// Sample the hidden states
// TODO: sample correctly from the categorical.
thrust::transform(
poshidprobs[k].data().begin(), poshidprobs[k].data().end(), poshidstates[k].data().begin(),
poshidstates[k].data().begin(), _1 > _2
);
}
// Calculate the total activation of the visible units
posvisact = tbblas::sum(v);
/*** END OF POSITIVE PHASE ***/
/*** START NEGATIVE PHASE ***/
// Calculate p(v | H, F) = sigm(sum(W_k * h_k) + b)
thrust::fill(vneg.data().begin(), vneg.data().end(), value_t(0));
for (unsigned k = 0; k < filterCount; ++k) {
// device_proxy_t paddedProxy = tbblas::subrange(padded, start, layerDim);
// thrust::copy(poshidstates[k].begin(), poshidstates[k].end(), paddedProxy.begin());
padded[start, layerDim] = poshidstates[k];
vtemp = tbblas::conv(F[k], padded);
vneg = vneg + vtemp;
}
vneg = vneg + b;
// For the binary case
if (!crbm->getIsGaussian()) {
thrust::transform(vneg.begin(), vneg.end(), vneg.begin(),
sigmoid<value_t>());
if (getSampleVisibles()) {
if ((status = hiprandGenerateUniformDouble(gen, vtemp.data().data().get(), vtemp.data().size())) != HIPRAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not generate random numbers: " << status;
return;
}
thrust::transform(
vneg.data().begin(), vneg.data().end(), vtemp.data().begin(),
vneg.data().begin(), _1 > _2
);
}
} else {
if (getSampleVisibles()) {
if ((status = hiprandGenerateNormalDouble(gen,
vtemp.data().data().get(),
vtemp.data().size(),
0, 1.0)) != HIPRAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not generate random numbers: " << status;
return;
}
thrust::transform(
vneg.data().begin(), vneg.data().end(), vtemp.data().begin(),
vneg.data().begin(), thrust::plus<value_t>()
);
}
}
for (unsigned k = 0; k < filterCount; ++k) {
// Calculate p(h_k | vneg, F) = sigm((~F_k * v) + c_k)
neghidstates[k] = tbblas::conv(tbblas::flip(F[k]), vneg); // x = ~F_k * v + c_k
neghidstates[k] = neghidstates[k] + c[k];
thrust::transform(neghidstates[k].data().begin(), neghidstates[k].data().end(),
thrust::make_counting_iterator(0), neghidprobs[k].data().begin(),
softmax<value_t>(layerDim[0], blockSize));
// thrust::transform(neghidstates[k].data().begin(), neghidstates[k].data().end(), // x = sigm(x)
// neghidprobs[k].data().begin(), sigmoid<value_t>());
// Calculate energy and the total activation of the hidden units
negvishid[k] = tbblas::conv(tbblas::flip(neghidprobs[k]), vneg); // ~h_k * v
neghidact[k] = tbblas::sum(neghidprobs[k]);
}
// Calculate the total activation of the visible units
negvisact = tbblas::sum(vneg);
/*** END OF NEGATIVE PHASE ***/
double curerr = thrust::inner_product(vneg.begin(), vneg.end(), v.begin(), value_t(0),
thrust::plus<value_t>(), (_1 - _2) * (_1 - _2));
error += curerr;
momentum = (iEpoch > 5 ? finalmomentum : initialmomentum);
/*** UPDATE WEIGHTS AND BIASES ***/
if (iEpoch || !getCalculateBaseline()) {
for (unsigned k = 0; k < filterCount; ++k) {
Fincbatch[k] = Fincbatch[k] + (posvishid[k] = posvishid[k] - negvishid[k]);
cincbatch[k] += (poshidact[k] - neghidact[k]);
}
bincbatch = posvisact - negvisact;
}
}
for (unsigned k = 0; k < filterCount; ++k) {
Finc[k] = momentum * Finc[k] + (epsilonw / batchSize / layerVoxelCount) * Fincbatch[k];
Finc[k] = Finc[k] + (-epsilonw * weightcost) * F[k];
cinc[k] = momentum * cinc[k] + (epsilonhb / batchSize / layerVoxelCount) * cincbatch[k]
+ getSparsityPenalty() * cspabatch[k] / batchSize;
F[k] = F[k] + Finc[k];
c[k] = c[k] + cinc[k];
}
binc = momentum * binc + (epsilonvb / batchSize / inputVoxelCount) * bincbatch;
b += binc;
/*** END OF UPDATES ***/
if (monitor)
monitor->reportProgress(100. * (iEpoch * batchCount + (iBatch + 1)) / (epochCount * batchCount));
}
int eta = (int)(timer.elapsed() / (double)(iEpoch + 1) * (double)(epochCount - iEpoch - 1));
int sec = eta % 60;
int minutes = (eta / 60) % 60;
int hours = eta / 3600;
dlog(Severity::Trace) << "Epoch " << iEpoch << " error " << (error / sampleCount) << " after " << timer.elapsed() << "s. ETA: "
<< hours << " h " << minutes << " min " << sec << " s";
if (getShowProgress()){
boost::shared_ptr<std::vector<boost::shared_ptr<host_tensor_t> > > debugFilters(
new std::vector<boost::shared_ptr<host_tensor_t> >());
for (unsigned i = 0; i < filterCount; ++i)
debugFilters->push_back(boost::shared_ptr<host_tensor_t> (new host_tensor_t(F[i])));
data->setFilters(debugFilters);
}
if (getMaxTime() > 0 && timer.elapsed() > getMaxTime()) {
dlog(Severity::Warning) << "Deadline for the training reached. Training stopped.";
break;
}
if (monitor)
monitor->reportProgress(100. * (iEpoch + 1) / epochCount, (iEpoch < epochCount - 1) && getShowProgress());
}
if ((status = hiprandDestroyGenerator(gen)) != HIPRAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not destroy random number generator: " << status;
return;
}
{
boost::shared_ptr<std::vector<boost::shared_ptr<host_tensor_t> > > debugFilters(
new std::vector<boost::shared_ptr<host_tensor_t> >());
for (unsigned i = 0; i < filterCount; ++i) {
debugFilters->push_back(boost::shared_ptr<host_tensor_t> (new host_tensor_t(F[i])));
// dlog(Severity::Message) << "Filter " << i + 1 << ": "
// << tbblas::dot(*debugFilters->at(i), *debugFilters->at(i));
}
data->setFilters(debugFilters);
}
for (unsigned i = 0; i < filterCount; ++i) {
thrust::copy(F[i].begin(), F[i].end(), filters[i]->begin());
// std::cout << "Filter " << i + 1 << ": " << tbblas::dot(F[i], F[i]) << std::endl;
}
crbm->setVisibleBias(b);
dlog() << "VisibleBias: " << b << " and " << crbm->getVisibleBias();
data->setModel(crbm);
}
}
}
| bac9e96bfb6d94a3ec7b6c1481c3284e1cce24a9.cu | /*
* ConvRbmTrainer_gpu.cu
*
* Created on: Mar 5, 2012
* Author: tombr
*/
#define BOOST_TYPEOF_COMPLIANT
#include "ConvRbmTrainer.h"
#include <iostream>
#include <sstream>
#include <capputils/Verifier.h>
#include <capputils/Logbook.h>
#include <boost/timer.hpp>
#include <tbblas/conv.hpp>
#include <tbblas/sum.hpp>
#include <tbblas/flip.hpp>
#include <tbblas/dot.hpp>
#include <thrust/inner_product.h>
//#include "sampling.hpp"
#include "RbmModel.h"
#include <curand.h>
#include <culib/CulibException.h>
#include <culib/util.h>
#include <ctime>
namespace gapputils {
namespace ml {
#define LOCATE(a,b) std::cout << #b": " << (char*)&a._##b - (char*)&a << std::endl
template<class T>
struct softmax : thrust::binary_function<T, unsigned, T> {
softmax(unsigned width, unsigned blockSize) : width(width), blockSize(blockSize) { }
__host__ __device__
T operator()(const T& value, const unsigned& idx) const {
T res = 0;
const int offset = (idx % width) % blockSize + ((idx / width) % blockSize) * width;
for (unsigned j = 0; j < blockSize; ++j)
for (unsigned i = 0; i < blockSize; ++i)
res += exp(*(&value + i + j * width - offset));
return exp(value) / (1 + res);
}
private:
unsigned blockSize, width;
};
void printMemoryAtLine(int line) {
std::stringstream stream;
stream << "line " << line;
culib::printMemoryStats(stream.str().c_str());
CULIB_CHECK_ERROR();
}
#define TRACE printMemoryAtLine(__LINE__);
//#define TRACE
class timer {
private:
time_t start;
public:
timer() {
start = time(0);
}
time_t elapsed() const {
return time(0) - start;
}
void restart() {
start = time(0);
}
};
void ConvRbmTrainer::execute(gapputils::workflow::IProgressMonitor* monitor) const {
using namespace thrust::placeholders;
using capputils::Severity;
capputils::Logbook& dlog = getLogbook();
dlog.setSeverity(Severity::Message);
ml::timer timer;
if (!data)
data = new ConvRbmTrainer();
// std::cout << "Device:" << std::endl;
// ConvRbmTrainer test;
// LOCATE(test, InitialModel);
// LOCATE(test, Tensors);
// LOCATE(test, SampleVisibles);
// LOCATE(test, EpochCount);
// LOCATE(test, BatchSize);
// LOCATE(test, LearningRate);
// LOCATE(test, Model);
if (!capputils::Verifier::Valid(*this, dlog))
return;
if (!getInitialModel()) {
dlog(Severity::Warning) << "No initial model given. Aborting!";
return;
}
if (!getTensors() || getTensors()->size() == 0) {
dlog(Severity::Warning) << "No training data given. Aborting!";
return;
}
dlog() << "Building ConvRBM ...";
// std::cout << "[Info] device size: " << sizeof(*this) << std::endl;
curandGenerator_t gen;
curandStatus_t status;
if ((status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)) != CURAND_STATUS_SUCCESS) {
dlog(Severity::Warning) << "Could not create random number generator: " << status;
return;
}
const unsigned sampleCount = getTensors()->size();
const int batchSize = getBatchSize();
boost::shared_ptr<ConvRbmModel> crbm = getInitialModel()->clone();
const unsigned dimCount = ConvRbmModel::dimCount;
const unsigned filterCount = crbm->getFilters()->size();
const unsigned blockSize = crbm->getPoolingBlockSize();
const host_tensor_t::dim_t& filterDim = crbm->getFilters()->at(0)->size();
const host_tensor_t::dim_t& inputDim = getTensors()->at(0)->size();
host_tensor_t::dim_t layerDim, paddedDim, start;
int filterWeightCount = 1, layerVoxelCount = 1, inputVoxelCount = 1;
for (unsigned i = 0; i < dimCount; ++i) {
assert(inputDim[i] >= filterDim[i]);
layerDim[i] = inputDim[i] - filterDim[i] + 1;
paddedDim[i] = inputDim[i] + filterDim[i] - 1;
start[i] = filterDim[i] - 1;
filterWeightCount *= filterDim[i];
layerVoxelCount *= layerDim[i];
inputVoxelCount *= inputDim[i];
}
assert((layerDim[0] % blockSize) == 0);
assert((layerDim[1] % blockSize) == 0);
assert((layerVoxelCount % 2) == 0); // TODO: loosen this constrain. Means, use temporary array to generate
assert((inputVoxelCount % 2) == 0); // random number (count must be a multiple of 2)
// Train the RBM
std::vector<boost::shared_ptr<host_tensor_t> >& tensors = *getTensors();
std::vector<boost::shared_ptr<host_tensor_t> > X;
for (unsigned i = 0; i < tensors.size(); ++i) {
X.push_back(boost::shared_ptr<host_tensor_t>(new host_tensor_t(*tensors[i])));
}
if (crbm->getIsGaussian()) {
// Calculate the mean and normalize the data
value_t mean = crbm->getMean();
value_t stddev = crbm->getStddev();
dlog() << "Mean and sd = " << mean << ", " << stddev;
for (unsigned i = 0; i < X.size(); ++i)
*X[i] = *X[i] - mean;
for (unsigned i = 0; i < X.size(); ++i) {
*X[i] = *X[i] / stddev;
}
}
// Copy filters to the device
std::vector<boost::shared_ptr<host_tensor_t> >& filters = *crbm->getFilters();
std::vector<device_tensor_t > F;
for (unsigned i = 0; i < filters.size(); ++i) {
device_tensor_t filter(filters[i]->size());
thrust::copy(filters[i]->begin(), filters[i]->end(), filter.begin());
F.push_back(filter);
}
value_t b = crbm->getVisibleBias();
std::vector<value_t>& c = *crbm->getHiddenBiases();
dlog() << "ConvRBM initialized: " << timer.elapsed() << " s";
// Start the learning
const int batchCount = sampleCount / batchSize;
value_t epsilonw = getLearningRate(); // Learning rate for weights
value_t epsilonvb = getLearningRate(); // Learning rate for biases of visible units
value_t epsilonhb = getLearningRate(); // Learning rate for biases of hidden units
value_t weightcost = 0.0002;
value_t initialmomentum = 0.5; //65; // 0.5f;
value_t finalmomentum = 0.9; // 65; // 0.9f;
value_t momentum;
culib::printMemoryStats("ConvRbmTrainer initialized");
device_tensor_t v(inputDim), vneg(inputDim), vtemp(inputDim), padded(paddedDim);
thrust::fill(padded.begin(), padded.end(), value_t(0));
std::vector<device_tensor_t> poshidprobs, poshidstates, posvishid, neghidprobs, neghidstates, negvishid, Finc, Fincbatch;
dlog(Severity::Trace) << "layer dim = " << layerDim[0] << ", " << layerDim[1] << ", " << layerDim[2];
for (unsigned i = 0; i < filterCount; ++i) {
device_tensor_t tens = device_tensor_t(layerDim);
poshidprobs.push_back(tens);
tens = device_tensor_t(layerDim);
poshidstates.push_back(tens);
tens = device_tensor_t(filterDim);
posvishid.push_back(tens);
neghidprobs.push_back(device_tensor_t(layerDim));
neghidstates.push_back(device_tensor_t(layerDim));
negvishid.push_back(device_tensor_t(filterDim));
Finc.push_back(device_tensor_t(filterDim));
thrust::fill(Finc[i].begin(), Finc[i].end(), value_t(0));
Fincbatch.push_back(device_tensor_t(filterDim));
}
value_t posvisact, negvisact, binc = 0, bincbatch;
std::vector<value_t> poshidact(filterCount), neghidact(filterCount),
cinc(filterCount, 0), cincbatch(filterCount, 0),
cspa(filterCount, 0), cspabatch(filterCount, 0);
const int epochCount = getEpochCount();
dlog() << "Preparation finished after " << timer.elapsed() << " s";
CULIB_CHECK_ERROR();
culib::printMemoryStats("ConvRbmTrainer memory allocated");
dlog() << "Starting training";
timer.restart();
if (epochCount && getShowProgress()) {
boost::shared_ptr<std::vector<boost::shared_ptr<host_tensor_t> > > debugFilters(
new std::vector<boost::shared_ptr<host_tensor_t> >());
for (unsigned i = 0; i < filterCount; ++i)
debugFilters->push_back(boost::shared_ptr<host_tensor_t> (new host_tensor_t(F[i])));
data->setFilters(debugFilters);
}
if (monitor)
monitor->reportProgress(0, getShowProgress());
for (int iEpoch = 0; iEpoch < epochCount && (monitor ? !monitor->getAbortRequested() : true); ++iEpoch) {
double error = 0;
for (int iBatch = 0; iBatch < batchCount && (monitor ? !monitor->getAbortRequested() : true); ++iBatch) {
for (unsigned k = 0; k < filterCount; ++k) {
thrust::fill(Fincbatch[k].begin(), Fincbatch[k].end(), value_t(0));
cincbatch[k] = 0;
cspabatch[k] = 0;
}
bincbatch = 0;
for (int iSample = 0; iSample < batchSize && (monitor ? !monitor->getAbortRequested() : true); ++iSample) {
/*** START POSITIVE PHASE ***/
const int randomSample = rand() % sampleCount;
// Get current sample
if (getUseRandomSamples())
thrust::copy(X[randomSample]->begin(), X[randomSample]->end(), v.begin());
else
thrust::copy(X[iSample + iBatch * batchSize]->begin(), X[iSample + iBatch * batchSize]->end(), v.begin());
// For each filter (Could be written as a single 4D convolution in case of a 2D image and 3D filter))
for (unsigned k = 0; k < filterCount && (monitor ? !monitor->getAbortRequested() : true); ++k) {
// Calculate p(h_k | v, F) = sigm((~F_k * v) + c_k)
poshidstates[k] = tbblas::conv(tbblas::flip(F[k]), v);
poshidstates[k] = poshidstates[k]+ c[k]; // x = ~F_k * v + c_k
// I'm using the state array here for the sum. Not nice but works fine and saves some space
thrust::transform(poshidstates[k].data().begin(), poshidstates[k].data().end(),
thrust::make_counting_iterator(0), poshidprobs[k].data().begin(),
softmax<value_t>(layerDim[0], blockSize));
// thrust::transform(poshidstates[k].data().begin(), poshidstates[k].data().end(), // x = sigm(x)
// poshidprobs[k].data().begin(), sigmoid<value_t>());
// Calculate energy and the total activation of the hidden units
posvishid[k] = tbblas::conv(tbblas::flip(poshidprobs[k]), v); // ~h_k * v
poshidact[k] = tbblas::sum(poshidprobs[k]);
if (iEpoch || !getCalculateBaseline())
cspabatch[k] = cspabatch[k] + getSparsityTarget() - tbblas::sum(poshidprobs[k]) / poshidprobs[k].data().size();
// fill states with random numbers which are then used to sample the units
// TODO: use curandGenerateUniform if value_t == float
if ((status = curandGenerateUniformDouble(gen,
poshidstates[k].data().data().get(),
poshidstates[k].data().size())) != CURAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not generate random numbers: " << status;
return;
}
// Sample the hidden states
// TODO: sample correctly from the categorical.
thrust::transform(
poshidprobs[k].data().begin(), poshidprobs[k].data().end(), poshidstates[k].data().begin(),
poshidstates[k].data().begin(), _1 > _2
);
}
// Calculate the total activation of the visible units
posvisact = tbblas::sum(v);
/*** END OF POSITIVE PHASE ***/
/*** START NEGATIVE PHASE ***/
// Calculate p(v | H, F) = sigm(sum(W_k * h_k) + b)
thrust::fill(vneg.data().begin(), vneg.data().end(), value_t(0));
for (unsigned k = 0; k < filterCount; ++k) {
// device_proxy_t paddedProxy = tbblas::subrange(padded, start, layerDim);
// thrust::copy(poshidstates[k].begin(), poshidstates[k].end(), paddedProxy.begin());
padded[start, layerDim] = poshidstates[k];
vtemp = tbblas::conv(F[k], padded);
vneg = vneg + vtemp;
}
vneg = vneg + b;
// For the binary case
if (!crbm->getIsGaussian()) {
thrust::transform(vneg.begin(), vneg.end(), vneg.begin(),
sigmoid<value_t>());
if (getSampleVisibles()) {
if ((status = curandGenerateUniformDouble(gen, vtemp.data().data().get(), vtemp.data().size())) != CURAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not generate random numbers: " << status;
return;
}
thrust::transform(
vneg.data().begin(), vneg.data().end(), vtemp.data().begin(),
vneg.data().begin(), _1 > _2
);
}
} else {
if (getSampleVisibles()) {
if ((status = curandGenerateNormalDouble(gen,
vtemp.data().data().get(),
vtemp.data().size(),
0, 1.0)) != CURAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not generate random numbers: " << status;
return;
}
thrust::transform(
vneg.data().begin(), vneg.data().end(), vtemp.data().begin(),
vneg.data().begin(), thrust::plus<value_t>()
);
}
}
for (unsigned k = 0; k < filterCount; ++k) {
// Calculate p(h_k | vneg, F) = sigm((~F_k * v) + c_k)
neghidstates[k] = tbblas::conv(tbblas::flip(F[k]), vneg); // x = ~F_k * v + c_k
neghidstates[k] = neghidstates[k] + c[k];
thrust::transform(neghidstates[k].data().begin(), neghidstates[k].data().end(),
thrust::make_counting_iterator(0), neghidprobs[k].data().begin(),
softmax<value_t>(layerDim[0], blockSize));
// thrust::transform(neghidstates[k].data().begin(), neghidstates[k].data().end(), // x = sigm(x)
// neghidprobs[k].data().begin(), sigmoid<value_t>());
// Calculate energy and the total activation of the hidden units
negvishid[k] = tbblas::conv(tbblas::flip(neghidprobs[k]), vneg); // ~h_k * v
neghidact[k] = tbblas::sum(neghidprobs[k]);
}
// Calculate the total activation of the visible units
negvisact = tbblas::sum(vneg);
/*** END OF NEGATIVE PHASE ***/
double curerr = thrust::inner_product(vneg.begin(), vneg.end(), v.begin(), value_t(0),
thrust::plus<value_t>(), (_1 - _2) * (_1 - _2));
error += curerr;
momentum = (iEpoch > 5 ? finalmomentum : initialmomentum);
/*** UPDATE WEIGHTS AND BIASES ***/
if (iEpoch || !getCalculateBaseline()) {
for (unsigned k = 0; k < filterCount; ++k) {
Fincbatch[k] = Fincbatch[k] + (posvishid[k] = posvishid[k] - negvishid[k]);
cincbatch[k] += (poshidact[k] - neghidact[k]);
}
bincbatch = posvisact - negvisact;
}
}
for (unsigned k = 0; k < filterCount; ++k) {
Finc[k] = momentum * Finc[k] + (epsilonw / batchSize / layerVoxelCount) * Fincbatch[k];
Finc[k] = Finc[k] + (-epsilonw * weightcost) * F[k];
cinc[k] = momentum * cinc[k] + (epsilonhb / batchSize / layerVoxelCount) * cincbatch[k]
+ getSparsityPenalty() * cspabatch[k] / batchSize;
F[k] = F[k] + Finc[k];
c[k] = c[k] + cinc[k];
}
binc = momentum * binc + (epsilonvb / batchSize / inputVoxelCount) * bincbatch;
b += binc;
/*** END OF UPDATES ***/
if (monitor)
monitor->reportProgress(100. * (iEpoch * batchCount + (iBatch + 1)) / (epochCount * batchCount));
}
int eta = (int)(timer.elapsed() / (double)(iEpoch + 1) * (double)(epochCount - iEpoch - 1));
int sec = eta % 60;
int minutes = (eta / 60) % 60;
int hours = eta / 3600;
dlog(Severity::Trace) << "Epoch " << iEpoch << " error " << (error / sampleCount) << " after " << timer.elapsed() << "s. ETA: "
<< hours << " h " << minutes << " min " << sec << " s";
if (getShowProgress()){
boost::shared_ptr<std::vector<boost::shared_ptr<host_tensor_t> > > debugFilters(
new std::vector<boost::shared_ptr<host_tensor_t> >());
for (unsigned i = 0; i < filterCount; ++i)
debugFilters->push_back(boost::shared_ptr<host_tensor_t> (new host_tensor_t(F[i])));
data->setFilters(debugFilters);
}
if (getMaxTime() > 0 && timer.elapsed() > getMaxTime()) {
dlog(Severity::Warning) << "Deadline for the training reached. Training stopped.";
break;
}
if (monitor)
monitor->reportProgress(100. * (iEpoch + 1) / epochCount, (iEpoch < epochCount - 1) && getShowProgress());
}
if ((status = curandDestroyGenerator(gen)) != CURAND_STATUS_SUCCESS)
{
dlog(Severity::Error) << "Could not destroy random number generator: " << status;
return;
}
{
boost::shared_ptr<std::vector<boost::shared_ptr<host_tensor_t> > > debugFilters(
new std::vector<boost::shared_ptr<host_tensor_t> >());
for (unsigned i = 0; i < filterCount; ++i) {
debugFilters->push_back(boost::shared_ptr<host_tensor_t> (new host_tensor_t(F[i])));
// dlog(Severity::Message) << "Filter " << i + 1 << ": "
// << tbblas::dot(*debugFilters->at(i), *debugFilters->at(i));
}
data->setFilters(debugFilters);
}
for (unsigned i = 0; i < filterCount; ++i) {
thrust::copy(F[i].begin(), F[i].end(), filters[i]->begin());
// std::cout << "Filter " << i + 1 << ": " << tbblas::dot(F[i], F[i]) << std::endl;
}
crbm->setVisibleBias(b);
dlog() << "VisibleBias: " << b << " and " << crbm->getVisibleBias();
data->setModel(crbm);
}
}
}
|
489b2e343207fbf10a7dbc5284b24fcf9fa9aecc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/crop_layer.hpp"
namespace caffe {
#ifdef USE_ROCM
// Copy (one line per thread) from one array to another, with arbitrary
// strides in the last two dimensions.
template <typename Dtype>
__global__ void copy_kernel(const int_tp n,
const int_tp height,
const int_tp width,
const int_tp src_inner_stride,
const int_tp dest_inner_stride,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, n) {
int_tp src_start = index * src_inner_stride;
int_tp dest_start = index * dest_inner_stride;
for (int_tp i = 0; i < width; ++i) {
dest[dest_start + i] = src[src_start + i];
}
}
}
#endif // USE_ROCM
template <typename Dtype>
void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const vector<int_tp>& offsets,
vector<int_tp> indices,
int_tp cur_dim,
const Dtype* src_data, Dtype* dest_data,
bool is_forward) {
if (cur_dim + 2 < top[0]->num_axes()) {
// We are not yet at the final dimension, call copy recursivley
for (int_tp i = 0; i < top[0]->shape(cur_dim); ++i) {
indices[cur_dim] = i;
crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1,
src_data, dest_data, is_forward);
}
} else {
// We are at the last two dimensions, which are stored continuously in
// memory. With (N,C,H,W)
// (0,1,2,3) cur_dim -> H
// cur_dim+1 -> W
const int_tp lines = top[0]->shape(cur_dim);
const int_tp height = top[0]->shape(cur_dim);
const int_tp width = top[0]->shape(cur_dim+1);
std::vector<int_tp> ind_off(cur_dim+2, 0);
for (int_tp j = 0; j < cur_dim; ++j) {
ind_off[j] = indices[j] + offsets[j];
}
ind_off[cur_dim] = offsets[cur_dim];
ind_off[cur_dim+1] = offsets[cur_dim+1];
// Compute copy strides
const int_tp src_inner_stride = bottom[0]->shape(cur_dim+1);
const int_tp dest_inner_stride = top[0]->shape(cur_dim+1);
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (is_forward) {
const Dtype* bottom_data = bottom[0]->gpu_data() +
bottom[0]->offset(ind_off);
Dtype* top_data = top[0]->mutable_gpu_data() +
top[0]->offset(indices);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(lines),
CAFFE_CUDA_NUM_THREADS)(
lines, height, width,
src_inner_stride,
dest_inner_stride,
bottom_data, top_data);
} else {
const Dtype* top_diff = top[0]->gpu_diff() +
top[0]->offset(indices);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() +
bottom[0]->offset(ind_off);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(lines),
CAFFE_CUDA_NUM_THREADS)(
lines, height, width,
dest_inner_stride,
src_inner_stride,
top_diff, bottom_diff);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_copy_crop = program.get_kernel(
CL_KERNEL_SELECT("crop_copy"));
if (is_forward) {
viennacl::ocl::enqueue(
oclk_copy_crop(
lines, height, width,
src_inner_stride,
dest_inner_stride,
WrapHandle((cl_mem)(bottom[0]->gpu_data()), &ctx),
bottom[0]->offset(ind_off),
WrapHandle((cl_mem)(top[0]->mutable_gpu_data()), &ctx),
top[0]->offset(indices)),
ctx.get_queue());
} else {
viennacl::ocl::enqueue(
oclk_copy_crop(
lines, height, width,
dest_inner_stride,
src_inner_stride,
WrapHandle((cl_mem)(top[0]->gpu_diff()), &ctx),
top[0]->offset(indices),
WrapHandle((cl_mem)(bottom[0]->mutable_gpu_diff()), &ctx),
bottom[0]->offset(ind_off)),
ctx.get_queue());
}
#endif // USE_GREENTEA
}
}
}
template<typename Dtype>
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
std::vector<int_tp> indices(top[0]->num_axes(), 0);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
}
template<typename Dtype>
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (propagate_down[0]) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
#endif
} else {
#ifdef USE_GREENTEA
greentea_gpu_set(this->device_->id(), bottom[0]->count(),
static_cast<Dtype>(0), (cl_mem) bottom_diff, 0);
#endif
}
std::vector<int_tp> indices(top[0]->num_axes(), 0);
crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff,
false);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe
| 489b2e343207fbf10a7dbc5284b24fcf9fa9aecc.cu | #include <vector>
#include "caffe/layers/crop_layer.hpp"
namespace caffe {
#ifdef USE_CUDA
// Copy (one line per thread) from one array to another, with arbitrary
// strides in the last two dimensions.
template <typename Dtype>
__global__ void copy_kernel(const int_tp n,
const int_tp height,
const int_tp width,
const int_tp src_inner_stride,
const int_tp dest_inner_stride,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, n) {
int_tp src_start = index * src_inner_stride;
int_tp dest_start = index * dest_inner_stride;
for (int_tp i = 0; i < width; ++i) {
dest[dest_start + i] = src[src_start + i];
}
}
}
#endif // USE_CUDA
template <typename Dtype>
void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const vector<int_tp>& offsets,
vector<int_tp> indices,
int_tp cur_dim,
const Dtype* src_data, Dtype* dest_data,
bool is_forward) {
if (cur_dim + 2 < top[0]->num_axes()) {
// We are not yet at the final dimension, call copy recursivley
for (int_tp i = 0; i < top[0]->shape(cur_dim); ++i) {
indices[cur_dim] = i;
crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1,
src_data, dest_data, is_forward);
}
} else {
// We are at the last two dimensions, which are stored continuously in
// memory. With (N,C,H,W)
// (0,1,2,3) cur_dim -> H
// cur_dim+1 -> W
const int_tp lines = top[0]->shape(cur_dim);
const int_tp height = top[0]->shape(cur_dim);
const int_tp width = top[0]->shape(cur_dim+1);
std::vector<int_tp> ind_off(cur_dim+2, 0);
for (int_tp j = 0; j < cur_dim; ++j) {
ind_off[j] = indices[j] + offsets[j];
}
ind_off[cur_dim] = offsets[cur_dim];
ind_off[cur_dim+1] = offsets[cur_dim+1];
// Compute copy strides
const int_tp src_inner_stride = bottom[0]->shape(cur_dim+1);
const int_tp dest_inner_stride = top[0]->shape(cur_dim+1);
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (is_forward) {
const Dtype* bottom_data = bottom[0]->gpu_data() +
bottom[0]->offset(ind_off);
Dtype* top_data = top[0]->mutable_gpu_data() +
top[0]->offset(indices);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(lines),
CAFFE_CUDA_NUM_THREADS)(
lines, height, width,
src_inner_stride,
dest_inner_stride,
bottom_data, top_data);
} else {
const Dtype* top_diff = top[0]->gpu_diff() +
top[0]->offset(indices);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() +
bottom[0]->offset(ind_off);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel CUDA_KERNEL(CAFFE_GET_BLOCKS(lines),
CAFFE_CUDA_NUM_THREADS)(
lines, height, width,
dest_inner_stride,
src_inner_stride,
top_diff, bottom_diff);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_copy_crop = program.get_kernel(
CL_KERNEL_SELECT("crop_copy"));
if (is_forward) {
viennacl::ocl::enqueue(
oclk_copy_crop(
lines, height, width,
src_inner_stride,
dest_inner_stride,
WrapHandle((cl_mem)(bottom[0]->gpu_data()), &ctx),
bottom[0]->offset(ind_off),
WrapHandle((cl_mem)(top[0]->mutable_gpu_data()), &ctx),
top[0]->offset(indices)),
ctx.get_queue());
} else {
viennacl::ocl::enqueue(
oclk_copy_crop(
lines, height, width,
dest_inner_stride,
src_inner_stride,
WrapHandle((cl_mem)(top[0]->gpu_diff()), &ctx),
top[0]->offset(indices),
WrapHandle((cl_mem)(bottom[0]->mutable_gpu_diff()), &ctx),
bottom[0]->offset(ind_off)),
ctx.get_queue());
}
#endif // USE_GREENTEA
}
}
}
template<typename Dtype>
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
std::vector<int_tp> indices(top[0]->num_axes(), 0);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
}
template<typename Dtype>
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (propagate_down[0]) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
#endif
} else {
#ifdef USE_GREENTEA
greentea_gpu_set(this->device_->id(), bottom[0]->count(),
static_cast<Dtype>(0), (cl_mem) bottom_diff, 0);
#endif
}
std::vector<int_tp> indices(top[0]->num_axes(), 0);
crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff,
false);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe
|
6143a67d7039705dce6d89ebe243fe26e33902b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
if ((c < numCols) && (r < numRows)) {
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16, 1); //TODO
const dim3 gridSize( numCols/blockSize.x + 1, numRows/blockSize.y + 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 6143a67d7039705dce6d89ebe243fe26e33902b2.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int c = (blockIdx.x * blockDim.x) + threadIdx.x;
int r = (blockIdx.y * blockDim.y) + threadIdx.y;
if ((c < numCols) && (r < numRows)) {
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16, 1); //TODO
const dim3 gridSize( numCols/blockSize.x + 1, numRows/blockSize.y + 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
1e2f373bedb5eb4bd7f859a82e3356f8fdfcd970.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <iomanip>
#include <cstring>
#include <cmath>
#include <stdlib.h>
#include<sys/time.h>
#define BLOCKS 16
#define THREADS 16
using namespace std;
void run_sort(void);
int compSuffixes(char *suffix1, char *suffix2, int length);
//-----------------------DO NOT CHANGE NAMES, ONLY MODIFY VALUES--------------------------------------------
//Final Values that will be compared for correctness
//You may change the function prototypes and definitions, but you need to present final results in these arrays
//-----------------------------Structures for correctness check-------------------
int **SA_Final_student;
int **L_counts_student;
char *L_student;
int F_counts_student[]={0,0,0,0};
int num_value=0;
int read_count = 0;
int read_length = 0;
__global__ void bitonic_sort_step(char **dev_values, int j, int k, int num_value){
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
//printf("1dev="<<dev_values[0]<<endl;
//printf("gfdgfdgdsfg\n");
//printf("gfdgfdgdsfg\n 1dev=%s",dev_values[0]);
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
printf("1110");
for(int m=0;m<num_value;m++){
if (dev_values[i][m]>dev_values[ixj][m]) {
printf("2222");
/* exchange(i,ixj); */
char* temp;
temp=dev_values[i];
dev_values[i]=dev_values[ixj];
dev_values[ixj]=temp;
break;
}
}
}
if ((i&k)!=0) {
/* Sort descending */
for(int m=0;m<num_value;m++){
if (dev_values[i][m]<dev_values[ixj][m]) {
printf("2222");
/* exchange(i,ixj); */
char* temp;
temp=dev_values[i];
dev_values[i]=dev_values[ixj];
dev_values[ixj]=temp;
break;
}
}
}
}
}
void bitonic_sort(char **values){
char **dev_values;
size_t size = num_value * sizeof(char);
hipMalloc((void***) &dev_values, read_count);
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
for(int i=0;i<read_count;i++){
hipMalloc((void**) &(dev_values[i]), size);
hipMemcpy(dev_values[i], values[i], size, hipMemcpyHostToDevice);
}
dim3 blocks(BLOCKS,1); /* Number of blocks */
dim3 threads(THREADS,1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= num_value; k <<= 1) {
/* Minor step */
for (j=k>>1; j>0; j=j>>1) {
hipLaunchKernelGGL(( bitonic_sort_step), dim3(blocks), dim3(threads), 0, 0, dev_values, j, k, num_value);
}
}
<<<<<<< HEAD
}
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
cout<<"========================="<<endl;
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
//printf("1dev=%s",dev_values);
//cout<<"dev="<<dev_values<<endl;
//for(int i=0; i<num_value; ++i){
//for(int j=0; j<num_value; ++j)
//cout<<dev_values[j][i];
//cout<<endl;
// }
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
/*for(int i=0;i<sizeof(dev_values);i++){
cout<<"dev="<<dev_values[i]<<endl;
}*/
//cout<<"dev="<<dev_values[0]<<endl;
hipFree(dev_values);
=======
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
cout<<"========================="<<endl;
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
hipFree(dev_values);
>>>>>>> 7418d3a441adee75c498344c34d2a845d2585ee1
}
//Calculates the final FM-Index
int** makeFMIndex_student(char ***suffixes, int read_count, int read_length, int F_count[], char *L_student){
int i, j;
SA_Final_student=(int**)malloc(read_count*read_length*sizeof(int*));
for(i=0;i<read_count*read_length;i++)
SA_Final_student[i]=(int*)malloc(2*sizeof(int));
//Temporary storage for collecting together all suffixes
char **temp_suffixes=(char**)malloc(read_count*read_length*sizeof(char*));
//Initalization of temporary storage
for(i=0;i<read_count;i++){
for(j=0;j<read_length;j++){
temp_suffixes[i*read_length+j]=(char*)malloc(num_value*sizeof(char));
memcpy(&temp_suffixes[i*read_length+j], &suffixes[i][j],read_length*sizeof(char));
SA_Final_student[i*read_length+j][0]=j;
SA_Final_student[i*read_length+j][1]=i;
}
}
char *temp=(char*)malloc(read_length*sizeof(char));
int **L_count=(int**)malloc(read_length*read_count*sizeof(int*));
for(i=0;i<read_length*read_count;i++){
L_count[i]=(int*)malloc(4*sizeof(int));
for(j=0;j<4;j++){
L_count[i][j]=0;
}
}
//run_sort();
//Focus on improving this for evaluation purpose
//Sorting of suffixes
/*for(i=0;i<read_count*read_length-1;i++){
for(j=0;j<read_count*read_length-i-1;j++){
if(compSuffixes(temp_suffixes[j], temp_suffixes[j+1], read_length)>0){
memcpy(temp, temp_suffixes[j], read_length*sizeof(char));
memcpy(temp_suffixes[j], temp_suffixes[j+1], read_length*sizeof(char));
memcpy(temp_suffixes[j+1], temp, read_length*sizeof(char));
int temp_int = SA_Final_student[j][0];
SA_Final_student[j][0]=SA_Final_student[j+1][0];
SA_Final_student[j+1][0]=temp_int;
temp_int = SA_Final_student[j][1];
SA_Final_student[j][1]=SA_Final_student[j+1][1];
SA_Final_student[j+1][1]=temp_int;
}
}
}*/
bitonic_sort(temp_suffixes);
free(temp);
char this_F = '$';
j=0;
//Calculation of F_count's
for(i=0;i<read_count*read_length;i++){
int count=0;
while(temp_suffixes[i][0]==this_F){
count++;i++;
}
F_count[j++]=j==0?count:count+1;
this_F = temp_suffixes[i][0];
if(temp_suffixes[i][0]=='T')
break;
}
//Calculation of L_student's and L_count's
for(i=0;i<read_count*read_length;i++){
char ch = temp_suffixes[i][read_length-1];
L_student[i]=ch;
if(i>0){
for(int k=0;k<4;k++)
L_count[i][k]=L_count[i-1][k];
}
if(ch=='A')
L_count[i][0]++;
else if(ch=='C')
L_count[i][1]++;
else if(ch=='G')
L_count[i][2]++;
else if(ch=='T')
L_count[i][3]++;
}
return L_count;
}
//--------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------
//-----------------------DO NOT CHANGE--------------------------------------------
//int read_count = 0;
//int read_length = 0;
int **SA_Final;
int **L_counts;
char *L;
int F_counts[]={0,0,0,0};
//Read file to get reads
char** inputReads(char *file_path, int *read_count, int *length){
FILE *read_file = fopen(file_path, "r");
int ch, lines=0;
char **reads;
do
{
ch = fgetc(read_file);
if (ch == '\n')
lines++;
} while (ch != EOF);
rewind(read_file);
reads=(char**)malloc(lines*sizeof(char*));
*read_count = lines;
int i = 0;
size_t len = 0;
for(i = 0; i < lines; i++)
{
reads[i] = NULL;
len = 0;
getline(&reads[i], &len, read_file);
}
fclose(read_file);
int j=0;
while(reads[0][j]!='\n')
j++;
*length = j+1;
for(i=0;i<lines;i++)
reads[i][j]='$';
int temp = log2((float)*length);
num_value = pow(2,temp);
return reads;
}
//Check correctness of values
int checker(){
int correct = 1;
for(int i=0; i<read_count*read_length;i++){
if(L_student[i]!=L[i]){
//cout<<"L_student[i]!=L[i]"<<endl;
correct = 0;
}
for(int j=0;j<2;j++){
if(SA_Final_student[i][j]!=SA_Final[i][j]){
//cout<<"SA_Final_student[i][j]!=SA_Final[i][j]"<<endl;
//cout<<SA_Final_student[i][j]<<" "<<SA_Final[i][j]<<endl;
correct = 0;
}
}
for(int j=0;j<4;j++){
if(L_counts_student[i][j]!=L_counts[i][j]){
//cout<<"L_counts_student[i][j]!=L_counts[i][j]"<<endl;
correct = 0;
}
}
}
for(int i=0;i<4;i++){
if(F_counts_student[i]!=F_counts[i]){
//cout<<"F_counts_student[i]!=F_counts[i]"<<endl;
correct = 0;
}
}
return correct;
}
//Rotate read by 1 character
void rotateRead(char *read, char *rotatedRead, int length){
for(int i=0;i<length-1;i++)
rotatedRead[i]=read[i+1];
rotatedRead[length-1]=read[0];
}
//Generate Sufixes and their SA's for a read
char** generateSuffixes(char *read, int length, int read_id){
char **suffixes=(char**)malloc(length*sizeof(char*));
suffixes[0]=(char*)malloc(length*sizeof(char));
for(int j=0;j<length;j++)
suffixes[0][j]=read[j];
for(int i=1;i<length;i++){
suffixes[i]=(char*)malloc(length*sizeof(char));
rotateRead(suffixes[i-1], suffixes[i], length);
}
return suffixes;
}
//Comparator for Suffixes
int compSuffixes(char *suffix1, char *suffix2, int length){
int ret = 0;
for(int i=0;i<length;i++){
if(suffix1[i]>suffix2[i])
return 1;
else if(suffix1[i]<suffix2[i])
return -1;
}
return ret;
}
//Calculates the final FM-Index
int** makeFMIndex(char ***suffixes, int read_count, int read_length, int F_count[], char *L){
int i, j;
SA_Final=(int**)malloc(read_count*read_length*sizeof(int*));
for(i=0;i<read_count*read_length;i++)
SA_Final[i]=(int*)malloc(2*sizeof(int));
//Temporary storage for collecting together all suffixes
char **temp_suffixes=(char**)malloc(read_count*read_length*sizeof(char*));
//Initalization of temporary storage
for(i=0;i<read_count;i++){
for(j=0;j<read_length;j++){
temp_suffixes[i*read_length+j]=(char*)malloc(read_length*sizeof(char));
memcpy(&temp_suffixes[i*read_length+j], &suffixes[i][j],read_length*sizeof(char));
SA_Final[i*read_length+j][0]=j;
SA_Final[i*read_length+j][1]=i;
}
}
char *temp=(char*)malloc(read_length*sizeof(char));
int **L_count=(int**)malloc(read_length*read_count*sizeof(int*));
for(i=0;i<read_length*read_count;i++){
L_count[i]=(int*)malloc(4*sizeof(int));
for(j=0;j<4;j++){
L_count[i][j]=0;
}
}
//Focus on improving this for evaluation purpose
//Sorting of suffixes
for(i=0;i<read_count*read_length-1;i++){
for(j=0;j<read_count*read_length-i-1;j++){
if(compSuffixes(temp_suffixes[j], temp_suffixes[j+1], read_length)>0){
memcpy(temp, temp_suffixes[j], read_length*sizeof(char));
memcpy(temp_suffixes[j], temp_suffixes[j+1], read_length*sizeof(char));
memcpy(temp_suffixes[j+1], temp, read_length*sizeof(char));
int temp_int = SA_Final[j][0];
SA_Final[j][0]=SA_Final[j+1][0];
SA_Final[j+1][0]=temp_int;
temp_int = SA_Final[j][1];
SA_Final[j][1]=SA_Final[j+1][1];
SA_Final[j+1][1]=temp_int;
}
}
}
free(temp);
char this_F = '$';
j=0;
//Calculation of F_count's
for(i=0;i<read_count*read_length;i++){
int count=0;
while(temp_suffixes[i][0]==this_F){
count++;i++;
}
F_count[j++]=j==0?count:count+1;
this_F = temp_suffixes[i][0];
if(temp_suffixes[i][0]=='T')
break;
}
//Calculation of L's and L_count's
for(i=0;i<read_count*read_length;i++){
char ch = temp_suffixes[i][read_length-1];
L[i]=ch;
if(i>0){
for(int k=0;k<4;k++)
L_count[i][k]=L_count[i-1][k];
}
if(ch=='A')
L_count[i][0]++;
else if(ch=='C')
L_count[i][1]++;
else if(ch=='G')
L_count[i][2]++;
else if(ch=='T')
L_count[i][3]++;
}
return L_count;
}
//-----------------------DO NOT CHANGE--------------------------------------------
int main(int argc, char *argv[]){
char **reads = inputReads(argv[1], &read_count, &read_length);//Input reads from file
char ***suffixes=(char***)malloc(read_count*sizeof(char**));//Storage for read-wise suffixes
//-----------------------------Structures for correctness check----------------------------------------------
L=(char*)malloc(read_count*read_length*sizeof(char*));//Final storage for last column of sorted suffixes
L_student=(char*)malloc(read_count*read_length*sizeof(char*));//Final storage for last column of sorted suffixes
//-----------------------------Structures for correctness check----------------------------------------------
//-----------Default implementation----------------
//-----------Time capture start--------------------
struct timeval TimeValue_Start;
struct timeval TimeValue_Final;
struct timezone TimeZone_Start;
struct timezone TimeZone_Final;
long time_start, time_end;
double time_overhead_default, time_overhead_student;
gettimeofday(&TimeValue_Start, &TimeZone_Start);
//Generate read-wise suffixes
for(int i=0;i<read_count;i++){
suffixes[i]=generateSuffixes(reads[i], read_length, i);
}
//Calculate finl FM-Index
L_counts = makeFMIndex(suffixes, read_count, read_length, F_counts, L);
gettimeofday(&TimeValue_Final, &TimeZone_Final);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_default = (time_end - time_start)/1000000.0;
//------------Time capture end----------------------
//--------------------------------------------------
//-----------Your implementations------------------
gettimeofday(&TimeValue_Start, &TimeZone_Start);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
//-----------Call your functions here--------------------
//Generate read-wise suffixes
for(int i=0;i<read_count;i++){
suffixes[i]=generateSuffixes(reads[i], read_length, i);
}
//Calculate finl FM-Index
L_counts_student = makeFMIndex_student(suffixes, read_count, read_length, F_counts_student, L_student);
//-----------Call your functions here--------------------
gettimeofday(&TimeValue_Final, &TimeZone_Final);
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_student = (time_end - time_start)/1000000.0;
//--------------------------------------------------
//----------------For debug purpose only-----------------
//for(int i=0;i<read_count*read_length;i++)
// cout<<L[i]<<"\t"<<SA_Final[i][0]<<","<<SA_Final[i][1]<<"\t"<<L_counts[i][0]<<","<<L_counts[i][1]<<","<<L_counts[i][2]<<","<<L_counts[i][3]<<endl;
//--------------------------------------------------
//---------------Correction check and speedup calculation----------------------
float speedup=0.0;
if(checker()==1)
speedup = time_overhead_default/time_overhead_student;
else
cout<<"X"<<endl;
cout<<"time_overhead_default="<<time_overhead_default<<endl;
cout<<"time_overhead_student="<<time_overhead_student<<endl;
cout<<"Speedup="<<speedup<<endl;
//-----------------------------------------------------------------------------
return 0;
}
| 1e2f373bedb5eb4bd7f859a82e3356f8fdfcd970.cu | #include <iostream>
#include <fstream>
#include <iomanip>
#include <cstring>
#include <cmath>
#include <stdlib.h>
#include<sys/time.h>
#define BLOCKS 16
#define THREADS 16
using namespace std;
void run_sort(void);
int compSuffixes(char *suffix1, char *suffix2, int length);
//-----------------------DO NOT CHANGE NAMES, ONLY MODIFY VALUES--------------------------------------------
//Final Values that will be compared for correctness
//You may change the function prototypes and definitions, but you need to present final results in these arrays
//-----------------------------Structures for correctness check-------------------
int **SA_Final_student;
int **L_counts_student;
char *L_student;
int F_counts_student[]={0,0,0,0};
int num_value=0;
int read_count = 0;
int read_length = 0;
__global__ void bitonic_sort_step(char **dev_values, int j, int k, int num_value){
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
//printf("1dev="<<dev_values[0]<<endl;
//printf("gfdgfdgdsfg\n");
//printf("gfdgfdgdsfg\n 1dev=%s",dev_values[0]);
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
printf("1110");
for(int m=0;m<num_value;m++){
if (dev_values[i][m]>dev_values[ixj][m]) {
printf("2222");
/* exchange(i,ixj); */
char* temp;
temp=dev_values[i];
dev_values[i]=dev_values[ixj];
dev_values[ixj]=temp;
break;
}
}
}
if ((i&k)!=0) {
/* Sort descending */
for(int m=0;m<num_value;m++){
if (dev_values[i][m]<dev_values[ixj][m]) {
printf("2222");
/* exchange(i,ixj); */
char* temp;
temp=dev_values[i];
dev_values[i]=dev_values[ixj];
dev_values[ixj]=temp;
break;
}
}
}
}
}
void bitonic_sort(char **values){
char **dev_values;
size_t size = num_value * sizeof(char);
cudaMalloc((void***) &dev_values, read_count);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
for(int i=0;i<read_count;i++){
cudaMalloc((void**) &(dev_values[i]), size);
cudaMemcpy(dev_values[i], values[i], size, cudaMemcpyHostToDevice);
}
dim3 blocks(BLOCKS,1); /* Number of blocks */
dim3 threads(THREADS,1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= num_value; k <<= 1) {
/* Minor step */
for (j=k>>1; j>0; j=j>>1) {
bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k, num_value);
}
}
<<<<<<< HEAD
}
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
cout<<"========================="<<endl;
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
//printf("1dev=%s",dev_values);
//cout<<"dev="<<dev_values<<endl;
//for(int i=0; i<num_value; ++i){
//for(int j=0; j<num_value; ++j)
//cout<<dev_values[j][i];
//cout<<endl;
// }
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
/*for(int i=0;i<sizeof(dev_values);i++){
cout<<"dev="<<dev_values[i]<<endl;
}*/
//cout<<"dev="<<dev_values[0]<<endl;
cudaFree(dev_values);
=======
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
cout<<"========================="<<endl;
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
for(int i=0;i<sizeof(values);i++)
cout<<"values="<<values[i]<<endl;
cudaFree(dev_values);
>>>>>>> 7418d3a441adee75c498344c34d2a845d2585ee1
}
//Calculates the final FM-Index
int** makeFMIndex_student(char ***suffixes, int read_count, int read_length, int F_count[], char *L_student){
int i, j;
SA_Final_student=(int**)malloc(read_count*read_length*sizeof(int*));
for(i=0;i<read_count*read_length;i++)
SA_Final_student[i]=(int*)malloc(2*sizeof(int));
//Temporary storage for collecting together all suffixes
char **temp_suffixes=(char**)malloc(read_count*read_length*sizeof(char*));
//Initalization of temporary storage
for(i=0;i<read_count;i++){
for(j=0;j<read_length;j++){
temp_suffixes[i*read_length+j]=(char*)malloc(num_value*sizeof(char));
memcpy(&temp_suffixes[i*read_length+j], &suffixes[i][j],read_length*sizeof(char));
SA_Final_student[i*read_length+j][0]=j;
SA_Final_student[i*read_length+j][1]=i;
}
}
char *temp=(char*)malloc(read_length*sizeof(char));
int **L_count=(int**)malloc(read_length*read_count*sizeof(int*));
for(i=0;i<read_length*read_count;i++){
L_count[i]=(int*)malloc(4*sizeof(int));
for(j=0;j<4;j++){
L_count[i][j]=0;
}
}
//run_sort();
//Focus on improving this for evaluation purpose
//Sorting of suffixes
/*for(i=0;i<read_count*read_length-1;i++){
for(j=0;j<read_count*read_length-i-1;j++){
if(compSuffixes(temp_suffixes[j], temp_suffixes[j+1], read_length)>0){
memcpy(temp, temp_suffixes[j], read_length*sizeof(char));
memcpy(temp_suffixes[j], temp_suffixes[j+1], read_length*sizeof(char));
memcpy(temp_suffixes[j+1], temp, read_length*sizeof(char));
int temp_int = SA_Final_student[j][0];
SA_Final_student[j][0]=SA_Final_student[j+1][0];
SA_Final_student[j+1][0]=temp_int;
temp_int = SA_Final_student[j][1];
SA_Final_student[j][1]=SA_Final_student[j+1][1];
SA_Final_student[j+1][1]=temp_int;
}
}
}*/
bitonic_sort(temp_suffixes);
free(temp);
char this_F = '$';
j=0;
//Calculation of F_count's
for(i=0;i<read_count*read_length;i++){
int count=0;
while(temp_suffixes[i][0]==this_F){
count++;i++;
}
F_count[j++]=j==0?count:count+1;
this_F = temp_suffixes[i][0];
if(temp_suffixes[i][0]=='T')
break;
}
//Calculation of L_student's and L_count's
for(i=0;i<read_count*read_length;i++){
char ch = temp_suffixes[i][read_length-1];
L_student[i]=ch;
if(i>0){
for(int k=0;k<4;k++)
L_count[i][k]=L_count[i-1][k];
}
if(ch=='A')
L_count[i][0]++;
else if(ch=='C')
L_count[i][1]++;
else if(ch=='G')
L_count[i][2]++;
else if(ch=='T')
L_count[i][3]++;
}
return L_count;
}
//--------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------
//-----------------------DO NOT CHANGE--------------------------------------------
//int read_count = 0;
//int read_length = 0;
int **SA_Final;
int **L_counts;
char *L;
int F_counts[]={0,0,0,0};
//Read file to get reads
char** inputReads(char *file_path, int *read_count, int *length){
FILE *read_file = fopen(file_path, "r");
int ch, lines=0;
char **reads;
do
{
ch = fgetc(read_file);
if (ch == '\n')
lines++;
} while (ch != EOF);
rewind(read_file);
reads=(char**)malloc(lines*sizeof(char*));
*read_count = lines;
int i = 0;
size_t len = 0;
for(i = 0; i < lines; i++)
{
reads[i] = NULL;
len = 0;
getline(&reads[i], &len, read_file);
}
fclose(read_file);
int j=0;
while(reads[0][j]!='\n')
j++;
*length = j+1;
for(i=0;i<lines;i++)
reads[i][j]='$';
int temp = log2((float)*length);
num_value = pow(2,temp);
return reads;
}
//Check correctness of values
int checker(){
int correct = 1;
for(int i=0; i<read_count*read_length;i++){
if(L_student[i]!=L[i]){
//cout<<"L_student[i]!=L[i]"<<endl;
correct = 0;
}
for(int j=0;j<2;j++){
if(SA_Final_student[i][j]!=SA_Final[i][j]){
//cout<<"SA_Final_student[i][j]!=SA_Final[i][j]"<<endl;
//cout<<SA_Final_student[i][j]<<" "<<SA_Final[i][j]<<endl;
correct = 0;
}
}
for(int j=0;j<4;j++){
if(L_counts_student[i][j]!=L_counts[i][j]){
//cout<<"L_counts_student[i][j]!=L_counts[i][j]"<<endl;
correct = 0;
}
}
}
for(int i=0;i<4;i++){
if(F_counts_student[i]!=F_counts[i]){
//cout<<"F_counts_student[i]!=F_counts[i]"<<endl;
correct = 0;
}
}
return correct;
}
//Rotate read by 1 character
void rotateRead(char *read, char *rotatedRead, int length){
for(int i=0;i<length-1;i++)
rotatedRead[i]=read[i+1];
rotatedRead[length-1]=read[0];
}
//Generate Sufixes and their SA's for a read
char** generateSuffixes(char *read, int length, int read_id){
char **suffixes=(char**)malloc(length*sizeof(char*));
suffixes[0]=(char*)malloc(length*sizeof(char));
for(int j=0;j<length;j++)
suffixes[0][j]=read[j];
for(int i=1;i<length;i++){
suffixes[i]=(char*)malloc(length*sizeof(char));
rotateRead(suffixes[i-1], suffixes[i], length);
}
return suffixes;
}
//Comparator for Suffixes
int compSuffixes(char *suffix1, char *suffix2, int length){
int ret = 0;
for(int i=0;i<length;i++){
if(suffix1[i]>suffix2[i])
return 1;
else if(suffix1[i]<suffix2[i])
return -1;
}
return ret;
}
//Calculates the final FM-Index
int** makeFMIndex(char ***suffixes, int read_count, int read_length, int F_count[], char *L){
int i, j;
SA_Final=(int**)malloc(read_count*read_length*sizeof(int*));
for(i=0;i<read_count*read_length;i++)
SA_Final[i]=(int*)malloc(2*sizeof(int));
//Temporary storage for collecting together all suffixes
char **temp_suffixes=(char**)malloc(read_count*read_length*sizeof(char*));
//Initalization of temporary storage
for(i=0;i<read_count;i++){
for(j=0;j<read_length;j++){
temp_suffixes[i*read_length+j]=(char*)malloc(read_length*sizeof(char));
memcpy(&temp_suffixes[i*read_length+j], &suffixes[i][j],read_length*sizeof(char));
SA_Final[i*read_length+j][0]=j;
SA_Final[i*read_length+j][1]=i;
}
}
char *temp=(char*)malloc(read_length*sizeof(char));
int **L_count=(int**)malloc(read_length*read_count*sizeof(int*));
for(i=0;i<read_length*read_count;i++){
L_count[i]=(int*)malloc(4*sizeof(int));
for(j=0;j<4;j++){
L_count[i][j]=0;
}
}
//Focus on improving this for evaluation purpose
//Sorting of suffixes
for(i=0;i<read_count*read_length-1;i++){
for(j=0;j<read_count*read_length-i-1;j++){
if(compSuffixes(temp_suffixes[j], temp_suffixes[j+1], read_length)>0){
memcpy(temp, temp_suffixes[j], read_length*sizeof(char));
memcpy(temp_suffixes[j], temp_suffixes[j+1], read_length*sizeof(char));
memcpy(temp_suffixes[j+1], temp, read_length*sizeof(char));
int temp_int = SA_Final[j][0];
SA_Final[j][0]=SA_Final[j+1][0];
SA_Final[j+1][0]=temp_int;
temp_int = SA_Final[j][1];
SA_Final[j][1]=SA_Final[j+1][1];
SA_Final[j+1][1]=temp_int;
}
}
}
free(temp);
char this_F = '$';
j=0;
//Calculation of F_count's
for(i=0;i<read_count*read_length;i++){
int count=0;
while(temp_suffixes[i][0]==this_F){
count++;i++;
}
F_count[j++]=j==0?count:count+1;
this_F = temp_suffixes[i][0];
if(temp_suffixes[i][0]=='T')
break;
}
//Calculation of L's and L_count's
for(i=0;i<read_count*read_length;i++){
char ch = temp_suffixes[i][read_length-1];
L[i]=ch;
if(i>0){
for(int k=0;k<4;k++)
L_count[i][k]=L_count[i-1][k];
}
if(ch=='A')
L_count[i][0]++;
else if(ch=='C')
L_count[i][1]++;
else if(ch=='G')
L_count[i][2]++;
else if(ch=='T')
L_count[i][3]++;
}
return L_count;
}
//-----------------------DO NOT CHANGE--------------------------------------------
int main(int argc, char *argv[]){
char **reads = inputReads(argv[1], &read_count, &read_length);//Input reads from file
char ***suffixes=(char***)malloc(read_count*sizeof(char**));//Storage for read-wise suffixes
//-----------------------------Structures for correctness check----------------------------------------------
L=(char*)malloc(read_count*read_length*sizeof(char*));//Final storage for last column of sorted suffixes
L_student=(char*)malloc(read_count*read_length*sizeof(char*));//Final storage for last column of sorted suffixes
//-----------------------------Structures for correctness check----------------------------------------------
//-----------Default implementation----------------
//-----------Time capture start--------------------
struct timeval TimeValue_Start;
struct timeval TimeValue_Final;
struct timezone TimeZone_Start;
struct timezone TimeZone_Final;
long time_start, time_end;
double time_overhead_default, time_overhead_student;
gettimeofday(&TimeValue_Start, &TimeZone_Start);
//Generate read-wise suffixes
for(int i=0;i<read_count;i++){
suffixes[i]=generateSuffixes(reads[i], read_length, i);
}
//Calculate finl FM-Index
L_counts = makeFMIndex(suffixes, read_count, read_length, F_counts, L);
gettimeofday(&TimeValue_Final, &TimeZone_Final);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_default = (time_end - time_start)/1000000.0;
//------------Time capture end----------------------
//--------------------------------------------------
//-----------Your implementations------------------
gettimeofday(&TimeValue_Start, &TimeZone_Start);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
//-----------Call your functions here--------------------
//Generate read-wise suffixes
for(int i=0;i<read_count;i++){
suffixes[i]=generateSuffixes(reads[i], read_length, i);
}
//Calculate finl FM-Index
L_counts_student = makeFMIndex_student(suffixes, read_count, read_length, F_counts_student, L_student);
//-----------Call your functions here--------------------
gettimeofday(&TimeValue_Final, &TimeZone_Final);
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_student = (time_end - time_start)/1000000.0;
//--------------------------------------------------
//----------------For debug purpose only-----------------
//for(int i=0;i<read_count*read_length;i++)
// cout<<L[i]<<"\t"<<SA_Final[i][0]<<","<<SA_Final[i][1]<<"\t"<<L_counts[i][0]<<","<<L_counts[i][1]<<","<<L_counts[i][2]<<","<<L_counts[i][3]<<endl;
//--------------------------------------------------
//---------------Correction check and speedup calculation----------------------
float speedup=0.0;
if(checker()==1)
speedup = time_overhead_default/time_overhead_student;
else
cout<<"X"<<endl;
cout<<"time_overhead_default="<<time_overhead_default<<endl;
cout<<"time_overhead_student="<<time_overhead_student<<endl;
cout<<"Speedup="<<speedup<<endl;
//-----------------------------------------------------------------------------
return 0;
}
|
ad971c91be06db21039dfb375166476405e2b070.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "needle.h"
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__device__ __host__ int
maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void
needle_cuda_shared_1( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
int t0;
if (tx == 0)
t0 = matrix_cuda[index_nw];
int t1 = matrix_cuda[index_w + cols * tx];
int t2 = matrix_cuda[index_n];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
temp[tx + 1][0] = t1;
temp[0][tx + 1] = t2;
if (tx == 0) {
temp[0][0] = t0;
}
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
unsigned mask = __ballot_sync(0xffffffff, threadIdx.x <= m);
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
// be careful because of the blocksize
__syncwarp(mask);
}
// for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
// unsigned mask = __ballot_sync(0xffffffff, threadIdx.x <= m);
// if ( tx <= m){
// int t_index_x = tx + BLOCK_SIZE - m ;
// int t_index_y = BLOCK_SIZE - tx;
// // int tmp = temp[t_index_y][t_index_x-1];
// int tmp = temp[t_index_y-1][t_index_x];
// // __syncwarp(mask);
// int tmp2 = __shfl_up_sync(mask, tmp, 1);
// // __syncwarp(mask);
// temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
// tmp2 - penalty,
// tmp- penalty);
// }
// __syncthreads();
// }
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
unsigned mask = __ballot_sync(0xffffffff, threadIdx.x <= m);
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
// __syncthreads();
__syncwarp(mask);
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
__global__ void
needle_cuda_shared_2( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i ;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
if (tx == 0)
temp[tx][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
| ad971c91be06db21039dfb375166476405e2b070.cu |
#include "needle.h"
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__device__ __host__ int
maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void
needle_cuda_shared_1( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
int t0;
if (tx == 0)
t0 = matrix_cuda[index_nw];
int t1 = matrix_cuda[index_w + cols * tx];
int t2 = matrix_cuda[index_n];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
temp[tx + 1][0] = t1;
temp[0][tx + 1] = t2;
if (tx == 0) {
temp[0][0] = t0;
}
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
unsigned mask = __ballot_sync(0xffffffff, threadIdx.x <= m);
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
// be careful because of the blocksize
__syncwarp(mask);
}
// for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
// unsigned mask = __ballot_sync(0xffffffff, threadIdx.x <= m);
// if ( tx <= m){
// int t_index_x = tx + BLOCK_SIZE - m ;
// int t_index_y = BLOCK_SIZE - tx;
// // int tmp = temp[t_index_y][t_index_x-1];
// int tmp = temp[t_index_y-1][t_index_x];
// // __syncwarp(mask);
// int tmp2 = __shfl_up_sync(mask, tmp, 1);
// // __syncwarp(mask);
// temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
// tmp2 - penalty,
// tmp- penalty);
// }
// __syncthreads();
// }
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
unsigned mask = __ballot_sync(0xffffffff, threadIdx.x <= m);
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
// __syncthreads();
__syncwarp(mask);
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
__global__ void
needle_cuda_shared_2( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i ;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
if (tx == 0)
temp[tx][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
|
f8a4e735fe5de7b5429a6e80fa77874a664a4ab3.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| f8a4e735fe5de7b5429a6e80fa77874a664a4ab3.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
0e24bffe6c8eb631e73a22a3ae764fea705321c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdlib.h>
#include<math.h>
#include<iostream>
#include<time.h>
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
using namespace std;
__global__ void dirihle (float* u, float* f, float* v, int N, float h2, int rb, float* eps)
{
__shared__ float s_u[BLOCK_DIM_X + 2][BLOCK_DIM_Y + 2];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
int k = j*N + i;
if ( i < N && j < N )
{
s_u[ty][tx] = u[k];
if ( ty == 1 && j > 0 )
s_u[ty-1][tx] = u[k-N];
if ( ty == BLOCK_DIM_X && j < N-1 )
s_u[ty+1][tx] = u[k+N];
if ( tx == 1 && i > 0 )
s_u[ty][tx-1] = u[k-1];
if ( tx == BLOCK_DIM_Y && i < N-1 )
s_u[ty][tx+1] = u[k+1];
if ( i == 0 )
s_u[ty][tx-1] = u[k+N-2];
if( i == N-1 )
s_u[ty][tx+1] = u[k-N+2];
}
__syncthreads();
eps[0] = 0;
if ( (i > 0 ) && ( i < N-1 ) && ( j > 0 ) && ( j < N-1 ) && ( k < N*N ) && ( i + j )%2 == rb ) {
u[k] = 0.25*(s_u[ty-1][tx] + s_u[ty+1][tx] + s_u[ty][tx-1] + s_u[ty][tx+1] - h2*f[k]);
}
if ( eps[0] < abs(v[k] - u[k] )){
eps[0] = abs(v[k] - u[k]);
}
}
int main( int argc, char * argv [] )
{
int rows = 256;
int count = 1;
float* eps1;
float* eps;
float h =(float)1/(rows-1);
int numBytes = sizeof(float)*rows*rows;
float* u;
float* f;
float* v;
u = ( float* )malloc( numBytes );
f = ( float* )malloc( numBytes );
v = ( float* )malloc( numBytes );
eps = ((float*)malloc(sizeof(float)));
eps1 = ((float*)malloc(sizeof(float)));
eps[0] = 0;
for ( int i = 0; i < rows; i++ )
for ( int j = 0; j < rows; j++ ) {
float x = i*h;
float y = j*h;
f[i*rows + j] =4 + 2*x*x - 2*x + 2*y*y - 2*y;
u[i*rows + j] = 0;
v[i*rows + j] = (x*x - x + 1)*(y*y - y + 1);
}
for ( int i = 0; i < rows; i++ ) {
float x = i*h;
u[i*rows] = x*x - x + 1;
u[i] = x*x - x + 1;
u[i*rows+(rows-1)] = x*x - x + 1;
u[(rows-1)*rows+i] = x*x - x + 1;
}
// allocate device memory
float * devU = NULL;
float * devV = NULL;
float * devF = NULL;
float * devE = NULL;
hipMalloc ( (void**)&devU, numBytes );
hipMalloc ( (void**)&devV, numBytes );
hipMalloc ( (void**)&devF, numBytes );
hipMalloc ((void**)&devE, sizeof(float));
//set kernel launch configuration
dim3 grid = dim3(16, 16);
dim3 blocks = dim3(16, 16);
hipMemcpy ( devU, u, numBytes, hipMemcpyHostToDevice);
hipMemcpy ( devV, v, numBytes, hipMemcpyHostToDevice);
hipMemcpy ( devF, f, numBytes, hipMemcpyHostToDevice);
hipMemcpy ( devE, eps, sizeof(float), hipMemcpyHostToDevice);
clock_t t1, t2;
t1 = clock();
do{
hipLaunchKernelGGL(( dirihle), dim3(grid), dim3(blocks), 0, 0, devU, devF, devV, rows, h*h, 0, devE);
hipMemcpy(eps1, devE, sizeof(float), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( dirihle), dim3(grid), dim3(blocks), 0, 0, devU, devF, devV, rows, h*h, 1, devE);
hipMemcpy(eps, devE, sizeof(float), hipMemcpyDeviceToHost);
cerr<<count<<" "<<eps[0]<<" "<<eps1[0]<<endl;
count++;
}
while (count < 35300 || eps[0] > 0.005 || eps1[0] > 0.005);
hipMemcpy(u,devU,numBytes,hipMemcpyDeviceToHost);
t2 = clock();
cerr<<" "<<((float)(t2 - t1))/(CLOCKS_PER_SEC)<<" sec" <<endl;
for (int i = 0; i < rows; i++ )
for ( int j = 0; j < rows; j++ ) {
cout << i*h << " " << j*h << " " << u[i*rows+j] <<endl;
}
delete [] u;
delete [] f;
delete [] v;
delete [] eps;
hipFree ( devU );
hipFree ( devV );
hipFree ( devF );
hipFree ( devE );
return 0;
}
| 0e24bffe6c8eb631e73a22a3ae764fea705321c9.cu | #include<stdlib.h>
#include<math.h>
#include<iostream>
#include<time.h>
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
using namespace std;
__global__ void dirihle (float* u, float* f, float* v, int N, float h2, int rb, float* eps)
{
__shared__ float s_u[BLOCK_DIM_X + 2][BLOCK_DIM_Y + 2];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
int k = j*N + i;
if ( i < N && j < N )
{
s_u[ty][tx] = u[k];
if ( ty == 1 && j > 0 )
s_u[ty-1][tx] = u[k-N];
if ( ty == BLOCK_DIM_X && j < N-1 )
s_u[ty+1][tx] = u[k+N];
if ( tx == 1 && i > 0 )
s_u[ty][tx-1] = u[k-1];
if ( tx == BLOCK_DIM_Y && i < N-1 )
s_u[ty][tx+1] = u[k+1];
if ( i == 0 )
s_u[ty][tx-1] = u[k+N-2];
if( i == N-1 )
s_u[ty][tx+1] = u[k-N+2];
}
__syncthreads();
eps[0] = 0;
if ( (i > 0 ) && ( i < N-1 ) && ( j > 0 ) && ( j < N-1 ) && ( k < N*N ) && ( i + j )%2 == rb ) {
u[k] = 0.25*(s_u[ty-1][tx] + s_u[ty+1][tx] + s_u[ty][tx-1] + s_u[ty][tx+1] - h2*f[k]);
}
if ( eps[0] < abs(v[k] - u[k] )){
eps[0] = abs(v[k] - u[k]);
}
}
int main( int argc, char * argv [] )
{
int rows = 256;
int count = 1;
float* eps1;
float* eps;
float h =(float)1/(rows-1);
int numBytes = sizeof(float)*rows*rows;
float* u;
float* f;
float* v;
u = ( float* )malloc( numBytes );
f = ( float* )malloc( numBytes );
v = ( float* )malloc( numBytes );
eps = ((float*)malloc(sizeof(float)));
eps1 = ((float*)malloc(sizeof(float)));
eps[0] = 0;
for ( int i = 0; i < rows; i++ )
for ( int j = 0; j < rows; j++ ) {
float x = i*h;
float y = j*h;
f[i*rows + j] =4 + 2*x*x - 2*x + 2*y*y - 2*y;
u[i*rows + j] = 0;
v[i*rows + j] = (x*x - x + 1)*(y*y - y + 1);
}
for ( int i = 0; i < rows; i++ ) {
float x = i*h;
u[i*rows] = x*x - x + 1;
u[i] = x*x - x + 1;
u[i*rows+(rows-1)] = x*x - x + 1;
u[(rows-1)*rows+i] = x*x - x + 1;
}
// allocate device memory
float * devU = NULL;
float * devV = NULL;
float * devF = NULL;
float * devE = NULL;
cudaMalloc ( (void**)&devU, numBytes );
cudaMalloc ( (void**)&devV, numBytes );
cudaMalloc ( (void**)&devF, numBytes );
cudaMalloc ((void**)&devE, sizeof(float));
//set kernel launch configuration
dim3 grid = dim3(16, 16);
dim3 blocks = dim3(16, 16);
cudaMemcpy ( devU, u, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy ( devV, v, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy ( devF, f, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy ( devE, eps, sizeof(float), cudaMemcpyHostToDevice);
clock_t t1, t2;
t1 = clock();
do{
dirihle<<<grid, blocks>>>(devU, devF, devV, rows, h*h, 0, devE);
cudaMemcpy(eps1, devE, sizeof(float), cudaMemcpyDeviceToHost);
dirihle<<<grid, blocks>>>(devU, devF, devV, rows, h*h, 1, devE);
cudaMemcpy(eps, devE, sizeof(float), cudaMemcpyDeviceToHost);
cerr<<count<<" "<<eps[0]<<" "<<eps1[0]<<endl;
count++;
}
while (count < 35300 || eps[0] > 0.005 || eps1[0] > 0.005);
cudaMemcpy(u,devU,numBytes,cudaMemcpyDeviceToHost);
t2 = clock();
cerr<<" "<<((float)(t2 - t1))/(CLOCKS_PER_SEC)<<" sec" <<endl;
for (int i = 0; i < rows; i++ )
for ( int j = 0; j < rows; j++ ) {
cout << i*h << " " << j*h << " " << u[i*rows+j] <<endl;
}
delete [] u;
delete [] f;
delete [] v;
delete [] eps;
cudaFree ( devU );
cudaFree ( devV );
cudaFree ( devF );
cudaFree ( devE );
return 0;
}
|
253ef31cd58be0b0687ffbc06d3ee45462917c35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__ void dkernel() {
printf("Hello World from GPU!\n");
}
int main() {
hipLaunchKernelGGL(( dkernel), dim3(1),dim3(332), 0, 0, );
hipDeviceSynchronize();
return 0;
} | 253ef31cd58be0b0687ffbc06d3ee45462917c35.cu | #include <iostream>
using namespace std;
__global__ void dkernel() {
printf("Hello World from GPU!\n");
}
int main() {
dkernel<<<1,332>>>();
cudaDeviceSynchronize();
return 0;
} |
b6a9d53efe272f3bec70019cad89928916646b86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
hipLaunchKernelGGL(( helloFromGPU), dim3(2), dim3(3), 0, 0, );
CHECK(hipDeviceReset());
printf("----> CPU CPU !!!! <-----\n");
return 0;
}
| b6a9d53efe272f3bec70019cad89928916646b86.cu | #include "../common/common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
helloFromGPU<<<2, 3>>>();
CHECK(cudaDeviceReset());
printf("----> CPU CPU !!!! <-----\n");
return 0;
}
|
111c492c41d182eb639162963832d1e507feec2c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 43532.0f
#define BETA 12313.0f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp)
{
int i, j;
for (i = 0; i < N; i++)
{
tmp[i] = 0;
y[i] = 0;
for (j = 0; j < N; j++)
{
tmp[i] = A[i*N + j] * x[j] + tmp[i];
y[i] = B[i*N + j] * x[j] + y[i];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void init(DATA_TYPE* A, DATA_TYPE* x)
{
int i, j;
for (i = 0; i < N; i++)
{
x[i] = ((DATA_TYPE) i) / N;
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
}
}
}
void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<(N); i++)
{
if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j = 0; j < N; j++)
{
tmp[i] += a[i * N + j] * x[j];
y[i] += b[i * N + j] * x[j];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void gesummvCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
double t_start, t_end;
double t_start_k, t_end_k;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N);
hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N);
hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N);
hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice);
hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice);
hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1);
t_start = rtclock();
hipLaunchKernelGGL(( gesummv_kernel), dim3(grid), dim3(block), 0, 0, A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu);
hipDeviceSynchronize();
t_end = rtclock();
hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost);
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
init(A, x);
GPU_argv_init();
gesummvCuda(A, B, x, y, tmp, y_outputFromGpu);
t_start = rtclock();
gesummv(A, B, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
free(A);
free(B);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return 0;
}
| 111c492c41d182eb639162963832d1e507feec2c.cu | /**
* gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 43532.0f
#define BETA 12313.0f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp)
{
int i, j;
for (i = 0; i < N; i++)
{
tmp[i] = 0;
y[i] = 0;
for (j = 0; j < N; j++)
{
tmp[i] = A[i*N + j] * x[j] + tmp[i];
y[i] = B[i*N + j] * x[j] + y[i];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void init(DATA_TYPE* A, DATA_TYPE* x)
{
int i, j;
for (i = 0; i < N; i++)
{
x[i] = ((DATA_TYPE) i) / N;
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
}
}
}
void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<(N); i++)
{
if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j = 0; j < N; j++)
{
tmp[i] += a[i * N + j] * x[j];
y[i] += b[i * N + j] * x[j];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void gesummvCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
double t_start, t_end;
double t_start_k, t_end_k;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N);
cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N);
cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N);
cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1);
t_start = rtclock();
gesummv_kernel<<< grid, block>>>(A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu);
cudaThreadSynchronize();
t_end = rtclock();
cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
init(A, x);
GPU_argv_init();
gesummvCuda(A, B, x, y, tmp, y_outputFromGpu);
t_start = rtclock();
gesummv(A, B, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
free(A);
free(B);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return 0;
}
|
943eb02686a2ff6259eebffddd3ebbe975989564.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./common/book.h"
#include <iostream>
#include <iomanip>
using namespace std;
#define imin(a, b) (a<b?a:b)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1) / threadsPerBlock);
__global__ void dot(double* a, double* b, double* c) {
__shared__ double cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
double temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main(void) {
// allocate memory on the CPU side
double *a = new double [N];
double *b = new double [N];
double c;
double *partial_c = new double [blocksPerGrid];
// allocate memory on device
double *dev_a, *dev_b, *dev_partial_c;
hipMalloc(&dev_a, N*sizeof(double));
hipMalloc(&dev_b, N*sizeof(double));
hipMalloc(&dev_partial_c, blocksPerGrid*sizeof(double));
// fill in the host memory with data
for (int i=0; i < N; i++) {
a[i] = i;
b[i] = i*2;
}
// copy the array 'a' and 'b' from cpu to gpu
hipMemcpy(dev_a, a, N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dot), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c);
// copy the array 'dev_partial_c' from gpu to cpu
hipMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(double), hipMemcpyDeviceToHost);
// finish up the cpu side
c = 0;
for (int i=0; i < blocksPerGrid; i++)
c += partial_c[i];
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
cout << "Does GPU value " << setprecision(6) << c << " = " << setprecision(6) << 2*sum_squares((double)(N-1)) << " ?" << endl;
// free memory on the GPU side
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_partial_c);
// free memory on the CPU side
delete [] a;
delete [] b;
delete [] partial_c;
}
| 943eb02686a2ff6259eebffddd3ebbe975989564.cu | #include "./common/book.h"
#include <iostream>
#include <iomanip>
using namespace std;
#define imin(a, b) (a<b?a:b)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1) / threadsPerBlock);
__global__ void dot(double* a, double* b, double* c) {
__shared__ double cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
double temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main(void) {
// allocate memory on the CPU side
double *a = new double [N];
double *b = new double [N];
double c;
double *partial_c = new double [blocksPerGrid];
// allocate memory on device
double *dev_a, *dev_b, *dev_partial_c;
cudaMalloc(&dev_a, N*sizeof(double));
cudaMalloc(&dev_b, N*sizeof(double));
cudaMalloc(&dev_partial_c, blocksPerGrid*sizeof(double));
// fill in the host memory with data
for (int i=0; i < N; i++) {
a[i] = i;
b[i] = i*2;
}
// copy the array 'a' and 'b' from cpu to gpu
cudaMemcpy(dev_a, a, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(double), cudaMemcpyHostToDevice);
dot<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_partial_c);
// copy the array 'dev_partial_c' from gpu to cpu
cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost);
// finish up the cpu side
c = 0;
for (int i=0; i < blocksPerGrid; i++)
c += partial_c[i];
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
cout << "Does GPU value " << setprecision(6) << c << " = " << setprecision(6) << 2*sum_squares((double)(N-1)) << " ?" << endl;
// free memory on the GPU side
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
// free memory on the CPU side
delete [] a;
delete [] b;
delete [] partial_c;
}
|
beb075194bfc0c1bc7954ffff676366fdc71f2fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/mean_iou_op.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void CountCUDAKernel(const int num_classes,
const int count,
const T* predictions,
const T* labels,
int* wrong,
int* correct) {
extern __shared__ int blcok_cache[];
int* wrong_c = blcok_cache;
int* correct_c = blcok_cache + num_classes;
// init cache
for (int i = threadIdx.x; i < num_classes * 2; i += blockDim.x) {
blcok_cache[i] = 0;
}
__syncthreads();
T pred;
T label;
CUDA_KERNEL_LOOP(i, count) {
pred = predictions[i];
label = labels[i];
if (pred == label) {
atomicAdd(correct_c + pred, 1);
} else {
atomicAdd(wrong_c + pred, 1);
atomicAdd(wrong_c + label, 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < num_classes; i += blockDim.x) {
atomicAdd(wrong + i, wrong_c[i]);
atomicAdd(correct + i, correct_c[i]);
}
}
__global__ void ComputeIoUCUDAKernel(
const int num_classes, int* wrong, int* correct, float* ious, float* iou) {
__shared__ int valid_count_c;
if (threadIdx.x == 0) {
valid_count_c = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(i, num_classes) {
int wrong_n = wrong[i];
int correct_n = correct[i];
int denominator = wrong_n + correct_n;
if (denominator > 0) {
atomicAdd(&valid_count_c, 1);
ious[i] = static_cast<float>(correct_n) / denominator;
} else {
ious[i] = 0;
}
}
__syncthreads();
if (threadIdx.x == 0) {
float iou_sum = 0;
for (int i = 0; i < num_classes; ++i) {
iou_sum += ious[i];
}
iou[0] += iou_sum / valid_count_c;
}
}
template <typename T>
class MeanIoUCUDAOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto& place = *dev_ctx.eigen_device();
// get input and output tensor
auto* predictions = ctx.Input<Tensor>("Predictions");
auto* labels = ctx.Input<Tensor>("Labels");
auto* out_mean_iou = ctx.Output<Tensor>("OutMeanIou");
auto* out_wrong = ctx.Output<Tensor>("OutWrong");
auto* out_correct = ctx.Output<Tensor>("OutCorrect");
int num_classes = static_cast<int>(ctx.Attr<int>("num_classes"));
// Get data ptr
const T* predictions_data = predictions->data<T>();
const T* labels_data = labels->data<T>();
int* out_wrong_data = out_wrong->mutable_data<int>(ctx.GetPlace());
int* out_correct_data = out_correct->mutable_data<int>(ctx.GetPlace());
float* out_mean_iou_data =
out_mean_iou->mutable_data<float>(ctx.GetPlace());
// Get Eigen tensor
auto out_mean_iou_t = EigenTensor<float, 1>::From(*out_mean_iou);
auto out_wrong_t = EigenTensor<int, 1>::From(*out_wrong);
auto out_correct_t = EigenTensor<int, 1>::From(*out_correct);
// Temporary memory
auto tmp_ious_data = memory::Alloc(dev_ctx, num_classes * sizeof(float));
float* ious_data = static_cast<float*>(tmp_ious_data->ptr());
// Init out_wrong, out_correct and out_mean_iou
out_wrong_t.device(place) = out_wrong_t.constant(0);
out_correct_t.device(place) = out_correct_t.constant(0);
out_mean_iou_t.device(place) = out_mean_iou_t.constant(0.0f);
// collect pre wrong, correct and mean_iou
auto in_mean_ious = ctx.MultiInput<Tensor>("InMeanIou");
for (int i = 0; i < in_mean_ious.size(); ++i) {
out_mean_iou_t.device(place) +=
EigenTensor<float, 1>::From(*in_mean_ious[i]);
}
auto in_wrongs = ctx.MultiInput<Tensor>("InWrongs");
for (int i = 0; i < in_wrongs.size(); ++i) {
out_wrong_t.device(place) += EigenTensor<int, 1>::From(*in_wrongs[i]);
}
auto in_corrects = ctx.MultiInput<Tensor>("InCorrects");
for (int i = 0; i < in_corrects.size(); ++i) {
out_correct_t.device(place) += EigenTensor<int, 1>::From(*in_corrects[i]);
}
// compute
auto stream = ctx.cuda_device_context().stream();
int block = PADDLE_CUDA_NUM_THREADS;
int grid = (predictions->numel() + block - 1) / block;
int cache_size = (num_classes * 2 + 1) * sizeof(int);
hipLaunchKernelGGL(( CountCUDAKernel<T>)
, dim3(grid), dim3(block), cache_size, stream, num_classes,
predictions->numel(),
predictions_data,
labels_data,
out_wrong_data,
out_correct_data);
hipLaunchKernelGGL(( ComputeIoUCUDAKernel), dim3(1), dim3(block), 0, stream, num_classes,
out_wrong_data,
out_correct_data,
ious_data,
out_mean_iou_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(mean_iou,
ops::MeanIoUCUDAOpKernel<int>,
ops::MeanIoUCUDAOpKernel<int64_t>,
ops::MeanIoUCUDAOpKernel<int32_t>);
| beb075194bfc0c1bc7954ffff676366fdc71f2fc.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/mean_iou_op.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void CountCUDAKernel(const int num_classes,
const int count,
const T* predictions,
const T* labels,
int* wrong,
int* correct) {
extern __shared__ int blcok_cache[];
int* wrong_c = blcok_cache;
int* correct_c = blcok_cache + num_classes;
// init cache
for (int i = threadIdx.x; i < num_classes * 2; i += blockDim.x) {
blcok_cache[i] = 0;
}
__syncthreads();
T pred;
T label;
CUDA_KERNEL_LOOP(i, count) {
pred = predictions[i];
label = labels[i];
if (pred == label) {
atomicAdd(correct_c + pred, 1);
} else {
atomicAdd(wrong_c + pred, 1);
atomicAdd(wrong_c + label, 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < num_classes; i += blockDim.x) {
atomicAdd(wrong + i, wrong_c[i]);
atomicAdd(correct + i, correct_c[i]);
}
}
__global__ void ComputeIoUCUDAKernel(
const int num_classes, int* wrong, int* correct, float* ious, float* iou) {
__shared__ int valid_count_c;
if (threadIdx.x == 0) {
valid_count_c = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(i, num_classes) {
int wrong_n = wrong[i];
int correct_n = correct[i];
int denominator = wrong_n + correct_n;
if (denominator > 0) {
atomicAdd(&valid_count_c, 1);
ious[i] = static_cast<float>(correct_n) / denominator;
} else {
ious[i] = 0;
}
}
__syncthreads();
if (threadIdx.x == 0) {
float iou_sum = 0;
for (int i = 0; i < num_classes; ++i) {
iou_sum += ious[i];
}
iou[0] += iou_sum / valid_count_c;
}
}
template <typename T>
class MeanIoUCUDAOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto& place = *dev_ctx.eigen_device();
// get input and output tensor
auto* predictions = ctx.Input<Tensor>("Predictions");
auto* labels = ctx.Input<Tensor>("Labels");
auto* out_mean_iou = ctx.Output<Tensor>("OutMeanIou");
auto* out_wrong = ctx.Output<Tensor>("OutWrong");
auto* out_correct = ctx.Output<Tensor>("OutCorrect");
int num_classes = static_cast<int>(ctx.Attr<int>("num_classes"));
// Get data ptr
const T* predictions_data = predictions->data<T>();
const T* labels_data = labels->data<T>();
int* out_wrong_data = out_wrong->mutable_data<int>(ctx.GetPlace());
int* out_correct_data = out_correct->mutable_data<int>(ctx.GetPlace());
float* out_mean_iou_data =
out_mean_iou->mutable_data<float>(ctx.GetPlace());
// Get Eigen tensor
auto out_mean_iou_t = EigenTensor<float, 1>::From(*out_mean_iou);
auto out_wrong_t = EigenTensor<int, 1>::From(*out_wrong);
auto out_correct_t = EigenTensor<int, 1>::From(*out_correct);
// Temporary memory
auto tmp_ious_data = memory::Alloc(dev_ctx, num_classes * sizeof(float));
float* ious_data = static_cast<float*>(tmp_ious_data->ptr());
// Init out_wrong, out_correct and out_mean_iou
out_wrong_t.device(place) = out_wrong_t.constant(0);
out_correct_t.device(place) = out_correct_t.constant(0);
out_mean_iou_t.device(place) = out_mean_iou_t.constant(0.0f);
// collect pre wrong, correct and mean_iou
auto in_mean_ious = ctx.MultiInput<Tensor>("InMeanIou");
for (int i = 0; i < in_mean_ious.size(); ++i) {
out_mean_iou_t.device(place) +=
EigenTensor<float, 1>::From(*in_mean_ious[i]);
}
auto in_wrongs = ctx.MultiInput<Tensor>("InWrongs");
for (int i = 0; i < in_wrongs.size(); ++i) {
out_wrong_t.device(place) += EigenTensor<int, 1>::From(*in_wrongs[i]);
}
auto in_corrects = ctx.MultiInput<Tensor>("InCorrects");
for (int i = 0; i < in_corrects.size(); ++i) {
out_correct_t.device(place) += EigenTensor<int, 1>::From(*in_corrects[i]);
}
// compute
auto stream = ctx.cuda_device_context().stream();
int block = PADDLE_CUDA_NUM_THREADS;
int grid = (predictions->numel() + block - 1) / block;
int cache_size = (num_classes * 2 + 1) * sizeof(int);
CountCUDAKernel<T>
<<<grid, block, cache_size, stream>>>(num_classes,
predictions->numel(),
predictions_data,
labels_data,
out_wrong_data,
out_correct_data);
ComputeIoUCUDAKernel<<<1, block, 0, stream>>>(num_classes,
out_wrong_data,
out_correct_data,
ious_data,
out_mean_iou_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(mean_iou,
ops::MeanIoUCUDAOpKernel<int>,
ops::MeanIoUCUDAOpKernel<int64_t>,
ops::MeanIoUCUDAOpKernel<int32_t>);
|
3428f68b5214973b0a9a9518589bd5a00f8c6324.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rnorm_all_in_one_kernel(float *vals, int n, float mu, float sigma)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Setup the RNG:
hiprandState_t rng_state;
hiprand_init(9131 + idx*17, 0, 0, &rng_state);
if (idx < n) {
vals[idx] = mu + sigma * hiprand_normal(&rng_state);
}
return;
} | 3428f68b5214973b0a9a9518589bd5a00f8c6324.cu | #include "includes.h"
__global__ void rnorm_all_in_one_kernel(float *vals, int n, float mu, float sigma)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Setup the RNG:
curandState rng_state;
curand_init(9131 + idx*17, 0, 0, &rng_state);
if (idx < n) {
vals[idx] = mu + sigma * curand_normal(&rng_state);
}
return;
} |
e85bceb97891628f6a209f461305948939cf8fb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VariationModeling.cu"
#else
#include "../common.h"
void THNN_(VariationModeling_updateOutput)(
THCState *state,
THCTensor *output,
THCTensor *input,
THCTensor *ptable,
int accumN)
// THCTensor *) // ref is for debugging
{
// THCUNN_assertSameGPU(state, 4, output, input, ptable, ref);
THCUNN_assertSameGPU(state, 3, output, input, ptable);
// get parameters
int ndims = THCTensor_(nDimension)(state,input);
long zdim = 1;
long ydim = 1;
long xdim = 1;
// From FC layer
if (ndims == 2) {
zdim = 1;
ydim = THCTensor_(size)(state, input, 0);
xdim = THCTensor_(size)(state, input, 1);
}
else if (ndims == 3) {
zdim = THCTensor_(size)(state, input, 0);
ydim = THCTensor_(size)(state, input, 1);
xdim = THCTensor_(size)(state, input, 2);
}
// From Convolution layer
else if (ndims == 4) {
zdim = 1;
ydim = THCTensor_(size)(state, input, 0);
xdim = THCTensor_(size)(state, input, 1) *
THCTensor_(size)(state, input, 2) *
THCTensor_(size)(state, input, 3);
}
else if (ndims == 5) {
printf("ndims == 5\n");
zdim = THCTensor_(size)(state, input, 0);
ydim = THCTensor_(size)(state, input, 1);
xdim = THCTensor_(size)(state, input, 2) *
THCTensor_(size)(state, input, 3) *
THCTensor_(size)(state, input, 4);
printf("xdim: %ld, ydim: %ld, zdim: %ld\n", xdim, ydim, zdim);
}
long nRow = THCTensor_(size)(state, ptable, 0);
long nCol = THCTensor_(size)(state, ptable, 1);
// resize output and make input continuous
THCTensor_(resizeAs)(state, output, input);
input = THCTensor_(newContiguous)(state, input);
// check if BLOCK_SIZE is properly set
// int check = BLOCK_SIZE;
// printf("BLOCK_SIZE shoulbe be 32 and it is '%d'\n", check);
// set dimension of block and grid
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((xdim + threads.x - 1)/threads.x, (ydim + threads.y - 1)/threads.y);
// printf("start variation modeling kernel\n");
// cunn_VariationModeling_updateOutput_kernel<real><<<grid, threads, nRow*nCol*sizeof(real)>>>(
hipLaunchKernelGGL(( cunn_VariationModeling_updateOutput_kernel<real>), dim3(grid), dim3(threads), 0, 0,
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
xdim,
ydim,
zdim,
THCTensor_(data)(state, ptable),
nRow,
nCol,
accumN);
// THCTensor_(data)(state, ref));
// printf("end kernel\n");
// error checking
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess)
{
THError(hipGetErrorString(errcode));
}
// free input
THCTensor_(free)(state, input);
}
#endif
// #ifndef THC_GENERIC_FILE
// #define THC_GENERIC_FILE "generic/VariationModeling.cu"
// #else
// #include "../common.h"
// void THNN_(VariationModeling_updateOutput)(
// THCState *state,
// THCTensor *output,
// THCTensor *input,
// THCTensor *ptable,
// int accumN)
// // THCTensor *) // ref is for debugging
// {
// // THCUNN_assertSameGPU(state, 4, output, input, ptable, ref);
// THCUNN_assertSameGPU(state, 3, output, input, ptable);
// // get parameters
// int ndims = THCTensor_(nDimension)(state,input);
// long zdim = 1;
// long ydim = 1;
// long xdim = 1;
// if (ndims == 2) {
// zdim = 1;
// ydim = THCTensor_(size)(state, input, 0);
// xdim = THCTensor_(size)(state, input, 1);
// }
// if (ndims == 3) {
// zdim = THCTensor_(size)(state, input, 0);
// ydim = THCTensor_(size)(state, input, 1);
// xdim = THCTensor_(size)(state, input, 2);
// }
// long nRow = THCTensor_(size)(state, ptable, 0);
// long nCol = THCTensor_(size)(state, ptable, 1);
// // for debugging, print ptable
// // real *temp = THCTensor_(data)(state, ptable);
// // for(int i=0; i<nRow; i++) {
// // for(int j=0; j<nCol; j++) {
// // printf("%.1f ", ScalarConvert<real, float>::to(temp[i*nCol+j]));
// // }
// // printf("\n");
// // }
// // resize output and make input continuous
// THCTensor_(resizeAs)(state, output, input);
// input = THCTensor_(newContiguous)(state, input);
// // check if BLOCK_SIZE is properly set
// // int check = BLOCK_SIZE;
// // printf("BLOCK_SIZE shoulbe be 32 and it is '%d'\n", check);
// // set dimension of block and grid
// dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
// dim3 grid((xdim + threads.x - 1)/threads.x, (ydim + threads.y - 1)/threads.y);
// // cunn_VariationModeling_updateOutput_kernel<real><<<grid, threads, nRow*nCol*sizeof(real)>>>(
// hipLaunchKernelGGL(( cunn_VariationModeling_updateOutput_kernel<real>), dim3(grid), dim3(threads), 0, 0,
// THCTensor_(data)(state, output),
// THCTensor_(data)(state, input),
// xdim,
// ydim,
// zdim,
// THCTensor_(data)(state, ptable),
// nRow,
// nCol,
// accumN);
// // THCTensor_(data)(state, ref));
// // error checking
// hipError_t errcode = hipGetLastError();
// if (errcode != hipSuccess)
// {
// THError(hipGetErrorString(errcode));
// }
// // free input
// THCTensor_(free)(state, input);
// }
// #endif
| e85bceb97891628f6a209f461305948939cf8fb4.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VariationModeling.cu"
#else
#include "../common.h"
void THNN_(VariationModeling_updateOutput)(
THCState *state,
THCTensor *output,
THCTensor *input,
THCTensor *ptable,
int accumN)
// THCTensor *) // ref is for debugging
{
// THCUNN_assertSameGPU(state, 4, output, input, ptable, ref);
THCUNN_assertSameGPU(state, 3, output, input, ptable);
// get parameters
int ndims = THCTensor_(nDimension)(state,input);
long zdim = 1;
long ydim = 1;
long xdim = 1;
// From FC layer
if (ndims == 2) {
zdim = 1;
ydim = THCTensor_(size)(state, input, 0);
xdim = THCTensor_(size)(state, input, 1);
}
else if (ndims == 3) {
zdim = THCTensor_(size)(state, input, 0);
ydim = THCTensor_(size)(state, input, 1);
xdim = THCTensor_(size)(state, input, 2);
}
// From Convolution layer
else if (ndims == 4) {
zdim = 1;
ydim = THCTensor_(size)(state, input, 0);
xdim = THCTensor_(size)(state, input, 1) *
THCTensor_(size)(state, input, 2) *
THCTensor_(size)(state, input, 3);
}
else if (ndims == 5) {
printf("ndims == 5\n");
zdim = THCTensor_(size)(state, input, 0);
ydim = THCTensor_(size)(state, input, 1);
xdim = THCTensor_(size)(state, input, 2) *
THCTensor_(size)(state, input, 3) *
THCTensor_(size)(state, input, 4);
printf("xdim: %ld, ydim: %ld, zdim: %ld\n", xdim, ydim, zdim);
}
long nRow = THCTensor_(size)(state, ptable, 0);
long nCol = THCTensor_(size)(state, ptable, 1);
// resize output and make input continuous
THCTensor_(resizeAs)(state, output, input);
input = THCTensor_(newContiguous)(state, input);
// check if BLOCK_SIZE is properly set
// int check = BLOCK_SIZE;
// printf("BLOCK_SIZE shoulbe be 32 and it is '%d'\n", check);
// set dimension of block and grid
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((xdim + threads.x - 1)/threads.x, (ydim + threads.y - 1)/threads.y);
// printf("start variation modeling kernel\n");
// cunn_VariationModeling_updateOutput_kernel<real><<<grid, threads, nRow*nCol*sizeof(real)>>>(
cunn_VariationModeling_updateOutput_kernel<real><<<grid, threads>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
xdim,
ydim,
zdim,
THCTensor_(data)(state, ptable),
nRow,
nCol,
accumN);
// THCTensor_(data)(state, ref));
// printf("end kernel\n");
// error checking
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess)
{
THError(cudaGetErrorString(errcode));
}
// free input
THCTensor_(free)(state, input);
}
#endif
// #ifndef THC_GENERIC_FILE
// #define THC_GENERIC_FILE "generic/VariationModeling.cu"
// #else
// #include "../common.h"
// void THNN_(VariationModeling_updateOutput)(
// THCState *state,
// THCTensor *output,
// THCTensor *input,
// THCTensor *ptable,
// int accumN)
// // THCTensor *) // ref is for debugging
// {
// // THCUNN_assertSameGPU(state, 4, output, input, ptable, ref);
// THCUNN_assertSameGPU(state, 3, output, input, ptable);
// // get parameters
// int ndims = THCTensor_(nDimension)(state,input);
// long zdim = 1;
// long ydim = 1;
// long xdim = 1;
// if (ndims == 2) {
// zdim = 1;
// ydim = THCTensor_(size)(state, input, 0);
// xdim = THCTensor_(size)(state, input, 1);
// }
// if (ndims == 3) {
// zdim = THCTensor_(size)(state, input, 0);
// ydim = THCTensor_(size)(state, input, 1);
// xdim = THCTensor_(size)(state, input, 2);
// }
// long nRow = THCTensor_(size)(state, ptable, 0);
// long nCol = THCTensor_(size)(state, ptable, 1);
// // for debugging, print ptable
// // real *temp = THCTensor_(data)(state, ptable);
// // for(int i=0; i<nRow; i++) {
// // for(int j=0; j<nCol; j++) {
// // printf("%.1f ", ScalarConvert<real, float>::to(temp[i*nCol+j]));
// // }
// // printf("\n");
// // }
// // resize output and make input continuous
// THCTensor_(resizeAs)(state, output, input);
// input = THCTensor_(newContiguous)(state, input);
// // check if BLOCK_SIZE is properly set
// // int check = BLOCK_SIZE;
// // printf("BLOCK_SIZE shoulbe be 32 and it is '%d'\n", check);
// // set dimension of block and grid
// dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
// dim3 grid((xdim + threads.x - 1)/threads.x, (ydim + threads.y - 1)/threads.y);
// // cunn_VariationModeling_updateOutput_kernel<real><<<grid, threads, nRow*nCol*sizeof(real)>>>(
// cunn_VariationModeling_updateOutput_kernel<real><<<grid, threads>>>(
// THCTensor_(data)(state, output),
// THCTensor_(data)(state, input),
// xdim,
// ydim,
// zdim,
// THCTensor_(data)(state, ptable),
// nRow,
// nCol,
// accumN);
// // THCTensor_(data)(state, ref));
// // error checking
// cudaError errcode = cudaGetLastError();
// if (errcode != cudaSuccess)
// {
// THError(cudaGetErrorString(errcode));
// }
// // free input
// THCTensor_(free)(state, input);
// }
// #endif
|
2eda57150f360268a5a337ca0165e7ecc7c792b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef uint32_t
#define uint32_t unsigned int
#endif
#define H0 0x6a09e667
#define H1 0xbb67ae85
#define H2 0x3c6ef372
#define H3 0xa54ff53a
#define H4 0x510e527f
#define H5 0x9b05688c
#define H6 0x1f83d9ab
#define H7 0x5be0cd19
__device__
uint rotr(uint x, int n) {
if (n < 32) return (x >> n) | (x << (32 - n));
return x;
}
__device__
uint ch(uint x, uint y, uint z) {
return (x & y) ^ (~x & z);
}
__device__
uint maj(uint x, uint y, uint z) {
return (x & y) ^ (x & z) ^ (y & z);
}
__device__
uint sigma0(uint x) {
return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22);
}
__device__
uint sigma1(uint x) {
return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25);
}
__device__
uint gamma0(uint x) {
return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3);
}
__device__
uint gamma1(uint x) {
return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10);
}
__constant__ uint K[64]={
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
__device__
uint get_global_id() {
uint blockId, threadsPerBlock;
blockId = blockIdx.z * gridDim.x * gridDim.y
+ blockIdx.y * gridDim.x
+ blockIdx.x;
threadsPerBlock = blockDim.x;
return threadIdx.x + threadsPerBlock * blockId;
}
__global__ void crypt_kernel(ulong start, uint *prefix, ulong plen, uint mask, uint *match){
int t;
uint W[80], rnd, id, A,B,C,D,E,F,G,H,T1,T2;
uint Ws[16];
id = get_global_id();
//if (id == 0) {
// printf("%08x\n", start);
//}
// brutforce is build up as: prefix | thr_id:04x | <rnd>:04x | start:08x
for (t = 0; t < plen; ++t) {
Ws[t] = prefix[t];
// printf("%04x", prefix[t]);
}
// printf("%04x\n", id);
T1 = (id & 0xf) | (((id >> 4) & 0xf) << 8) | (((id >> 8) & 0xf) << 16) | (((id >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)(start >> 32);
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 2] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)start;
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 3] = T1 + 0x30303030 + T2 * 0x27;
Ws[plen + 4] = 0x80000000;
for (t = plen + 5; t < 15; ++t) {
Ws[t] = 0;
}
Ws[15] = 128 + 32 * plen;
// preparing buffer done
/*
if (id == 0) {
printf("%016x: ", start);
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
}
*/
for (rnd = 0; rnd < 0x10000; ++rnd) {
uint digest[8] = {H0, H1, H2, H3, H4, H5, H6, H7};
#pragma unroll
for (t = 0; t < 16; ++t) {
W[t] = Ws[t];
}
T1 = (rnd & 0xf) | (((rnd >> 4) & 0xf) << 8) | (((rnd >> 8) & 0xf) << 16) | (((rnd >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
W[plen + 1] = T1 + 0x30303030 + T2 * 0x27;
A = digest[0] = H0;
B = digest[1] = H1;
C = digest[2] = H2;
D = digest[3] = H3;
E = digest[4] = H4;
F = digest[5] = H5;
G = digest[6] = H6;
H = digest[7] = H7;
for (t = 16; t < 64; t++) {
W[t] = gamma1(W[t - 2]) + W[t - 7] + gamma0(W[t - 15]) + W[t - 16];
}
for (t = 0; t < 64; t++) {
T1 = H + sigma1(E) + ch(E, F, G) + K[t] + W[t];
T2 = sigma0(A) + maj(A, B, C);
H = G; G = F; F = E; E = D + T1; D = C; C = B; B = A; A = T1 + T2;
}
digest[0] += A;
if ((digest[0] & mask) == 0) {
/*
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
*/
match[0] = 1;
match[1] = id;
match[2] = rnd;
}
}
} | 2eda57150f360268a5a337ca0165e7ecc7c792b7.cu | #ifndef uint32_t
#define uint32_t unsigned int
#endif
#define H0 0x6a09e667
#define H1 0xbb67ae85
#define H2 0x3c6ef372
#define H3 0xa54ff53a
#define H4 0x510e527f
#define H5 0x9b05688c
#define H6 0x1f83d9ab
#define H7 0x5be0cd19
__device__
uint rotr(uint x, int n) {
if (n < 32) return (x >> n) | (x << (32 - n));
return x;
}
__device__
uint ch(uint x, uint y, uint z) {
return (x & y) ^ (~x & z);
}
__device__
uint maj(uint x, uint y, uint z) {
return (x & y) ^ (x & z) ^ (y & z);
}
__device__
uint sigma0(uint x) {
return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22);
}
__device__
uint sigma1(uint x) {
return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25);
}
__device__
uint gamma0(uint x) {
return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3);
}
__device__
uint gamma1(uint x) {
return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10);
}
__constant__ uint K[64]={
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
__device__
uint get_global_id() {
uint blockId, threadsPerBlock;
blockId = blockIdx.z * gridDim.x * gridDim.y
+ blockIdx.y * gridDim.x
+ blockIdx.x;
threadsPerBlock = blockDim.x;
return threadIdx.x + threadsPerBlock * blockId;
}
__global__ void crypt_kernel(ulong start, uint *prefix, ulong plen, uint mask, uint *match){
int t;
uint W[80], rnd, id, A,B,C,D,E,F,G,H,T1,T2;
uint Ws[16];
id = get_global_id();
//if (id == 0) {
// printf("%08x\n", start);
//}
// brutforce is build up as: prefix | thr_id:04x | <rnd>:04x | start:08x
for (t = 0; t < plen; ++t) {
Ws[t] = prefix[t];
// printf("%04x", prefix[t]);
}
// printf("%04x\n", id);
T1 = (id & 0xf) | (((id >> 4) & 0xf) << 8) | (((id >> 8) & 0xf) << 16) | (((id >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)(start >> 32);
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 2] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)start;
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 3] = T1 + 0x30303030 + T2 * 0x27;
Ws[plen + 4] = 0x80000000;
for (t = plen + 5; t < 15; ++t) {
Ws[t] = 0;
}
Ws[15] = 128 + 32 * plen;
// preparing buffer done
/*
if (id == 0) {
printf("%016x: ", start);
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
}
*/
for (rnd = 0; rnd < 0x10000; ++rnd) {
uint digest[8] = {H0, H1, H2, H3, H4, H5, H6, H7};
#pragma unroll
for (t = 0; t < 16; ++t) {
W[t] = Ws[t];
}
T1 = (rnd & 0xf) | (((rnd >> 4) & 0xf) << 8) | (((rnd >> 8) & 0xf) << 16) | (((rnd >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
W[plen + 1] = T1 + 0x30303030 + T2 * 0x27;
A = digest[0] = H0;
B = digest[1] = H1;
C = digest[2] = H2;
D = digest[3] = H3;
E = digest[4] = H4;
F = digest[5] = H5;
G = digest[6] = H6;
H = digest[7] = H7;
for (t = 16; t < 64; t++) {
W[t] = gamma1(W[t - 2]) + W[t - 7] + gamma0(W[t - 15]) + W[t - 16];
}
for (t = 0; t < 64; t++) {
T1 = H + sigma1(E) + ch(E, F, G) + K[t] + W[t];
T2 = sigma0(A) + maj(A, B, C);
H = G; G = F; F = E; E = D + T1; D = C; C = B; B = A; A = T1 + T2;
}
digest[0] += A;
if ((digest[0] & mask) == 0) {
/*
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
*/
match[0] = 1;
match[1] = id;
match[2] = rnd;
}
}
} |
c70f9efebd6a13c44f39b4a96e0850ab1d842022.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=64 --blockDim=128 --warp-sync=32
#include "common.h"
template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n);
template __global__ void reduceMultiPass<128, true>(const float *g_idata, float *g_odata, unsigned int n);
template <unsigned int blockSize, bool nIsPow2>
__global__ void
reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n)
{
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n);
}
| c70f9efebd6a13c44f39b4a96e0850ab1d842022.cu | //pass
//--gridDim=64 --blockDim=128 --warp-sync=32
#include "common.h"
template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n);
template __global__ void reduceMultiPass<128, true>(const float *g_idata, float *g_odata, unsigned int n);
template <unsigned int blockSize, bool nIsPow2>
__global__ void
reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n)
{
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n);
}
|
6c827549af34532872681a3db035520d4a339a82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "modules/detectron/select_smooth_l1_loss_op.h"
namespace caffe2 {
namespace {
__global__ void SelectSmoothL1Kernel(
const int D, const int H, const int W,
const int M, const float* Y_hat, const float* Y, const float* L, float* out,
const float* S, const float beta) {
// f(x) = 0.5 * x^2 / beta if |x| < beta
// |x| - 0.5 * beta otherwise
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
for (int j = 0; j < 4; j++){
// Y_hat: N x (A * CLS * 4) x H x W
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = c10::hip::compat::abs(val);
if (abs_val < beta) {
out[ind] = (0.5 * val * val / beta) / c10::hip::compat::max(S[0], static_cast<float>(1.0));
} else {
out[ind] = (abs_val - 0.5 * beta) / c10::hip::compat::max(S[0], static_cast<float>(1.0));
}
}
}
}
__global__ void SelectSmoothL1GradientKernel(
const int D, const int H, const int W,
const int M,
const float* Y_hat,
const float* Y,
const float* L,
float* out,
const float* d_loss_data,
float norm,
const float* S,
float beta) {
// f'(x) = x / beta if |x| < beta
// = sign(x) otherwise
// We also scale by norm * d_loss in this kernel for convenience
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
float d_loss = *d_loss_data;
for (int j = 0; j < 4; j++) {
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = c10::hip::compat::abs(val);
if (abs_val < beta) {
out[ind] = norm * d_loss * val / beta / c10::hip::compat::max(S[0], static_cast<float>(1.0));
} else {
out[ind] = norm * d_loss * ((float(0) < val) - (val < float(0))) / c10::hip::compat::max(S[0], static_cast<float>(1.0));
}
}
}
}
} // namespace
template<>
bool SelectSmoothL1LossOp<float, CUDAContext>::RunOnDevice() {
// bbox targets predictions, for example: N x (A * 4) H x W in cls-agnostic case
auto& Y_hat = Input(0);
// true targets: for example: M x 4 where M is the #fg boxes per fpn level
auto& Y = Input(1);
// locations of fg boxes: M x 4
auto& L = Input(2);
// total number of fg boxes across all FPN levels: scalar
auto& S = Input(3);
auto* avg_loss = Output(0, vector<int64_t>(), at::dtype<float>());
if (Y.size() == 0){
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// initialization
buff_.ResizeLike(Y_hat);
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
buff_.size(), 0.0, buff_.mutable_data<float>(), &context_);
// Element-wise smooth l1 loss
// l := SelectSmoothL1((y_hat - y))
hipLaunchKernelGGL(( SelectSmoothL1Kernel), dim3(CAFFE_GET_BLOCKS(buff_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
D, H, W,
M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), buff_.mutable_data<float>(),
S.data<float>(), beta_);
// Sum of all losses
// al := sum_i l_i
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
buff_.size(), buff_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SelectSmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& L = Input(2);
auto& S = Input(3);
// Below is gradient of net w.r.t. avg_loss ("gradOuput"), should be all 1's
auto& d_avg_loss = Input(4);
auto* d_Y_hat = Output(0, Y_hat.sizes(), at::dtype<float>()); // gradient of net w.r.t. Y_hat ("gradInput")
math::Set<float, CUDAContext>(
d_Y_hat->size(), 0.0, d_Y_hat->mutable_data<float>(), &context_);
if (Y.size() == 0){
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := (y_hat - y)
// d_Y_hat := d_avg_loss * SelectSmoothL1'((y_hat - y))
hipLaunchKernelGGL(( SelectSmoothL1GradientKernel), dim3(CAFFE_GET_BLOCKS(d_Y_hat->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
D, H, W, M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), d_Y_hat->mutable_data<float>(),
d_avg_loss.data<float>(), scale_, S.data<float>(), beta_);
return true;
}
REGISTER_CUDA_OPERATOR(SelectSmoothL1Loss,
SelectSmoothL1LossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SelectSmoothL1LossGradient,
SelectSmoothL1LossGradientOp<float, CUDAContext>);
} // namespace caffe2
| 6c827549af34532872681a3db035520d4a339a82.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "modules/detectron/select_smooth_l1_loss_op.h"
namespace caffe2 {
namespace {
__global__ void SelectSmoothL1Kernel(
const int D, const int H, const int W,
const int M, const float* Y_hat, const float* Y, const float* L, float* out,
const float* S, const float beta) {
// f(x) = 0.5 * x^2 / beta if |x| < beta
// |x| - 0.5 * beta otherwise
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
for (int j = 0; j < 4; j++){
// Y_hat: N x (A * CLS * 4) x H x W
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = c10::cuda::compat::abs(val);
if (abs_val < beta) {
out[ind] = (0.5 * val * val / beta) / c10::cuda::compat::max(S[0], static_cast<float>(1.0));
} else {
out[ind] = (abs_val - 0.5 * beta) / c10::cuda::compat::max(S[0], static_cast<float>(1.0));
}
}
}
}
__global__ void SelectSmoothL1GradientKernel(
const int D, const int H, const int W,
const int M,
const float* Y_hat,
const float* Y,
const float* L,
float* out,
const float* d_loss_data,
float norm,
const float* S,
float beta) {
// f'(x) = x / beta if |x| < beta
// = sign(x) otherwise
// We also scale by norm * d_loss in this kernel for convenience
CUDA_1D_KERNEL_LOOP(i, M) {
int n = L[i * 4];
int c = L[i * 4 + 1];
int y = L[i * 4 + 2];
int x = L[i * 4 + 3];
float d_loss = *d_loss_data;
for (int j = 0; j < 4; j++) {
int ind = n * (D * H * W) + (c + j) * (H * W) + y * W + x;
float y_hat = Y_hat[ind];
float y = Y[i * 4 + j];
float val = y_hat - y;
float abs_val = c10::cuda::compat::abs(val);
if (abs_val < beta) {
out[ind] = norm * d_loss * val / beta / c10::cuda::compat::max(S[0], static_cast<float>(1.0));
} else {
out[ind] = norm * d_loss * ((float(0) < val) - (val < float(0))) / c10::cuda::compat::max(S[0], static_cast<float>(1.0));
}
}
}
}
} // namespace
template<>
bool SelectSmoothL1LossOp<float, CUDAContext>::RunOnDevice() {
// bbox targets predictions, for example: N x (A * 4) H x W in cls-agnostic case
auto& Y_hat = Input(0);
// true targets: for example: M x 4 where M is the #fg boxes per fpn level
auto& Y = Input(1);
// locations of fg boxes: M x 4
auto& L = Input(2);
// total number of fg boxes across all FPN levels: scalar
auto& S = Input(3);
auto* avg_loss = Output(0, vector<int64_t>(), at::dtype<float>());
if (Y.size() == 0){
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// initialization
buff_.ResizeLike(Y_hat);
math::Set<float, CUDAContext>(
1, static_cast<float>(0), avg_loss->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
buff_.size(), 0.0, buff_.mutable_data<float>(), &context_);
// Element-wise smooth l1 loss
// l := SelectSmoothL1((y_hat - y))
SelectSmoothL1Kernel<<<CAFFE_GET_BLOCKS(buff_.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
D, H, W,
M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), buff_.mutable_data<float>(),
S.data<float>(), beta_);
// Sum of all losses
// al := sum_i l_i
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
buff_.size(), buff_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SelectSmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& L = Input(2);
auto& S = Input(3);
// Below is gradient of net w.r.t. avg_loss ("gradOuput"), should be all 1's
auto& d_avg_loss = Input(4);
auto* d_Y_hat = Output(0, Y_hat.sizes(), at::dtype<float>()); // gradient of net w.r.t. Y_hat ("gradInput")
math::Set<float, CUDAContext>(
d_Y_hat->size(), 0.0, d_Y_hat->mutable_data<float>(), &context_);
if (Y.size() == 0){
return true;
}
int N = Y_hat.dim32(0);
int D = Y_hat.dim32(1);
int H = Y_hat.dim32(2);
int W = Y_hat.dim32(3);
int M = Y.dim32(0);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := (y_hat - y)
// d_Y_hat := d_avg_loss * SelectSmoothL1'((y_hat - y))
SelectSmoothL1GradientKernel<<<CAFFE_GET_BLOCKS(d_Y_hat->size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
D, H, W, M, Y_hat.data<float>(), Y.data<float>(),
L.data<float>(), d_Y_hat->mutable_data<float>(),
d_avg_loss.data<float>(), scale_, S.data<float>(), beta_);
return true;
}
REGISTER_CUDA_OPERATOR(SelectSmoothL1Loss,
SelectSmoothL1LossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SelectSmoothL1LossGradient,
SelectSmoothL1LossGradientOp<float, CUDAContext>);
} // namespace caffe2
|
9681ad563fdbeabcccdf36c127d836e6e91d3659.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define DATATYPE int
#define SMEMSIZE 512
#define REP 128
texture <int,1,hipReadModeElementType> texref1;
texture <int,1,hipReadModeElementType> texref2;
__global__ void texture_order_1(double *time,DATATYPE *out,int its)
{
DATATYPE p,q=threadIdx.x;
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=tex1Dfetch(texref1,q);
q=tex1Dfetch(texref2,p);
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx)
{
int its=30;
DATATYPE *d_in1,*d_in2;
hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
hipBindTexture(NULL,texref1,d_in1,sizeof(DATATYPE)*SMEMSIZE);
hipBindTexture(NULL,texref2,d_in2,sizeof(DATATYPE)*SMEMSIZE);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
hipMalloc((void**)&d_time,sizeof(double)*blocks*threads);
hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
hipLaunchKernelGGL(( texture_order_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_out,its);
hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt);
hipUnbindTexture(texref1);
hipUnbindTexture(texref2);
hipFree(d_time);
hipFree(d_out);
hipFree(d_in1);
hipFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
void init_disordered_32(DATATYPE *a,int n)
{
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<n;i+=32)
{
for (int j=0;j<32;j++)
{
int jj=rand()%(32-j);
a[i+j]=p[jj];
for (int k=jj;k<(32-j);k++)
{
p[k]=p[k+1];
}
}
for (int j=0;j<32;j++)
{
p[j]=a[i+j];
a[i+j]+=i;
}
}
}
void init_disordered_512(DATATYPE *a,int n)
{
const int nn=n/32;
DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn);
DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n);
init_order(q,nn);
for (int i=0;i<n;i+=nn)
{
for (int j=0;j<nn;j++)
{
int jj=rand()%(nn-j);
b[i+j]=q[jj];
for (int k=jj;k<(nn-j);k++)
{
q[k]=q[k+1];
}
}
for (int j=0;j<nn;j++)
{
q[j]=b[i+j];
}
}
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<32;i++)
{
for (int j=0;j<nn;j++)
{
a[j*32+i]=b[i*nn+j]*32+p[i];
}
}
free(q);
free(b);
}
int main()
{
DATATYPE *h_in1, *h_in2, *h_in3;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
init_disordered_32(h_in2, SMEMSIZE);
init_disordered_512(h_in3, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
for (int i = 0; i <= 1024; i += 32) {
int blocks = (i == 0 ? 1 : i);
int threads = 256;
main_test(blocks, threads, h_in1, h_in1, 1);
main_test(blocks, threads, h_in2, h_in2, 2);
main_test(blocks, threads, h_in3, h_in3, 3);
}
free(h_in1);
free(h_in2);
free(h_in3);
return 0;
} | 9681ad563fdbeabcccdf36c127d836e6e91d3659.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define DATATYPE int
#define SMEMSIZE 512
#define REP 128
texture <int,1,cudaReadModeElementType> texref1;
texture <int,1,cudaReadModeElementType> texref2;
__global__ void texture_order_1(double *time,DATATYPE *out,int its)
{
DATATYPE p,q=threadIdx.x;
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=tex1Dfetch(texref1,q);
q=tex1Dfetch(texref2,p);
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx)
{
int its=30;
DATATYPE *d_in1,*d_in2;
cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
cudaBindTexture(NULL,texref1,d_in1,sizeof(DATATYPE)*SMEMSIZE);
cudaBindTexture(NULL,texref2,d_in2,sizeof(DATATYPE)*SMEMSIZE);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
texture_order_1<<<blocks,threads>>>(d_time,d_out,its);
cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt);
cudaUnbindTexture(texref1);
cudaUnbindTexture(texref2);
cudaFree(d_time);
cudaFree(d_out);
cudaFree(d_in1);
cudaFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
void init_disordered_32(DATATYPE *a,int n)
{
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<n;i+=32)
{
for (int j=0;j<32;j++)
{
int jj=rand()%(32-j);
a[i+j]=p[jj];
for (int k=jj;k<(32-j);k++)
{
p[k]=p[k+1];
}
}
for (int j=0;j<32;j++)
{
p[j]=a[i+j];
a[i+j]+=i;
}
}
}
void init_disordered_512(DATATYPE *a,int n)
{
const int nn=n/32;
DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn);
DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n);
init_order(q,nn);
for (int i=0;i<n;i+=nn)
{
for (int j=0;j<nn;j++)
{
int jj=rand()%(nn-j);
b[i+j]=q[jj];
for (int k=jj;k<(nn-j);k++)
{
q[k]=q[k+1];
}
}
for (int j=0;j<nn;j++)
{
q[j]=b[i+j];
}
}
DATATYPE p[32];
for (int i=0;i<32;i++)
{
p[i]=i;
}
for (int i=0;i<32;i++)
{
for (int j=0;j<nn;j++)
{
a[j*32+i]=b[i*nn+j]*32+p[i];
}
}
free(q);
free(b);
}
int main()
{
DATATYPE *h_in1, *h_in2, *h_in3;
h_in1 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in2 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
h_in3 = (DATATYPE *) malloc(sizeof(DATATYPE) * SMEMSIZE);
init_order(h_in1, SMEMSIZE);
init_disordered_32(h_in2, SMEMSIZE);
init_disordered_512(h_in3, SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
for (int i = 0; i <= 1024; i += 32) {
int blocks = (i == 0 ? 1 : i);
int threads = 256;
main_test(blocks, threads, h_in1, h_in1, 1);
main_test(blocks, threads, h_in2, h_in2, 2);
main_test(blocks, threads, h_in3, h_in3, 3);
}
free(h_in1);
free(h_in2);
free(h_in3);
return 0;
} |
da41c8702ee12edf8a7efd37d94075302e31c777.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 2 of 2: implement the fast kernel using shared memory
__global__ void reverseArrayBlock(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
int inOffset = blockDim.x * blockIdx.x;
int in = inOffset + threadIdx.x;
// Load one element per thread from device memory and store it
// *in reversed order* into temporary shared memory
s_data[blockDim.x - 1 - threadIdx.x] = d_in[in];
// Block until all threads in the block have written their data to shared mem
__syncthreads();
// write the data from shared memory in forward order,
// but to the reversed block offset as before
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int out = outOffset + threadIdx.x;
d_out[out] = s_data[threadIdx.x];
}
////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
//int dimA = 256 * 1024; // 256K elements (1MB total)
int dimA = 256 * 16; // 256K elements (1MB total)
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numThreadsPerBlock = 256;
// Compute number of blocks needed based on array size and desired block size
int numBlocks = dimA / numThreadsPerBlock;
// Part 1 of 2: Compute the number of bytes of shared memory needed
// This is used in the kernel invocation below
int sharedMemSize = numThreadsPerBlock * sizeof(int);
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( (void **) &d_a, memSize );
hipMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock), sharedMemSize , 0, d_b, d_a );
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
printf("%d ",h_a[i]);
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
hipFree(d_a);
hipFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| da41c8702ee12edf8a7efd37d94075302e31c777.cu | // includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 2 of 2: implement the fast kernel using shared memory
__global__ void reverseArrayBlock(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
int inOffset = blockDim.x * blockIdx.x;
int in = inOffset + threadIdx.x;
// Load one element per thread from device memory and store it
// *in reversed order* into temporary shared memory
s_data[blockDim.x - 1 - threadIdx.x] = d_in[in];
// Block until all threads in the block have written their data to shared mem
__syncthreads();
// write the data from shared memory in forward order,
// but to the reversed block offset as before
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int out = outOffset + threadIdx.x;
d_out[out] = s_data[threadIdx.x];
}
////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
//int dimA = 256 * 1024; // 256K elements (1MB total)
int dimA = 256 * 16; // 256K elements (1MB total)
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numThreadsPerBlock = 256;
// Compute number of blocks needed based on array size and desired block size
int numBlocks = dimA / numThreadsPerBlock;
// Part 1 of 2: Compute the number of bytes of shared memory needed
// This is used in the kernel invocation below
int sharedMemSize = numThreadsPerBlock * sizeof(int);
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( (void **) &d_a, memSize );
cudaMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
reverseArrayBlock<<< dimGrid, dimBlock, sharedMemSize >>>( d_b, d_a );
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
printf("%d ",h_a[i]);
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
2b6b3f20d34f46ecbc65931f8c4a50e060c59029.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <Hornet.hpp>
#include <Static/CoreNumber/CoreNumber.cuh>
#include <cugraph/legacy/graph.hpp>
#include <cugraph/utilities/error.hpp>
//#include <nvgraph_gdf.h>
namespace cugraph {
namespace detail {
template <typename VT, typename ET, typename WT>
void core_number(legacy::GraphCSRView<VT, ET, WT> const& graph, int* core_number)
{
using HornetGraph = hornet::gpu::HornetStatic<int>;
using HornetInit = hornet::HornetInit<VT>;
using CoreNumber = hornets_nest::CoreNumberStatic;
HornetInit init(graph.number_of_vertices, graph.number_of_edges, graph.offsets, graph.indices);
HornetGraph hnt(init, hornet::DeviceType::DEVICE);
CoreNumber cn(hnt, core_number);
cn.run();
}
struct FilterEdges {
int k;
int* core_number;
FilterEdges(int _k, int* d_core_num) : k(_k), core_number(d_core_num) {}
template <typename T>
__host__ __device__ bool operator()(T t)
{
int src = thrust::get<0>(t);
int dst = thrust::get<1>(t);
return (core_number[src] >= k) && (core_number[dst] >= k);
}
};
template <typename VT, typename ET, typename WT>
void extract_edges(legacy::GraphCOOView<VT, ET, WT> const& i_graph,
legacy::GraphCOOView<VT, ET, WT>& o_graph,
VT* d_core,
int k)
{
hipStream_t stream{nullptr};
// If an edge satisfies k-core conditions i.e. core_num[src] and core_num[dst]
// are both greater than or equal to k, copy it to the output graph
if (i_graph.has_data()) {
auto inEdge = thrust::make_zip_iterator(
thrust::make_tuple(i_graph.src_indices, i_graph.dst_indices, i_graph.edge_data));
auto outEdge = thrust::make_zip_iterator(
thrust::make_tuple(o_graph.src_indices, o_graph.dst_indices, o_graph.edge_data));
auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
inEdge,
inEdge + i_graph.number_of_edges,
outEdge,
FilterEdges(k, d_core));
if (thrust::distance(outEdge, ptr) != o_graph.number_of_edges) {
CUGRAPH_FAIL("Edge extraction failed");
}
} else {
auto inEdge =
thrust::make_zip_iterator(thrust::make_tuple(i_graph.src_indices, i_graph.dst_indices));
auto outEdge =
thrust::make_zip_iterator(thrust::make_tuple(o_graph.src_indices, o_graph.dst_indices));
auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
inEdge,
inEdge + i_graph.number_of_edges,
outEdge,
FilterEdges(k, d_core));
if (thrust::distance(outEdge, ptr) != o_graph.number_of_edges) {
CUGRAPH_FAIL("Edge extraction failed");
}
}
}
// Extract a subgraph from in_graph (with or without weights)
// to out_graph based on whether edges in in_graph satisfy kcore
// conditions.
// i.e. All edges (s,d,w) in in_graph are copied over to out_graph
// if core_num[s] and core_num[d] are greater than or equal to k.
template <typename VT, typename ET, typename WT>
std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> extract_subgraph(
legacy::GraphCOOView<VT, ET, WT> const& in_graph,
int const* vid,
int const* core_num,
int k,
int len,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
hipStream_t stream{nullptr};
rmm::device_vector<VT> sorted_core_num(in_graph.number_of_vertices);
thrust::scatter(
rmm::exec_policy(stream)->on(stream), core_num, core_num + len, vid, sorted_core_num.begin());
VT* d_sorted_core_num = sorted_core_num.data().get();
// Count number of edges in the input graph that satisfy kcore conditions
// i.e. core_num[src] and core_num[dst] are both greater than or equal to k
auto edge =
thrust::make_zip_iterator(thrust::make_tuple(in_graph.src_indices, in_graph.dst_indices));
auto out_graph = std::make_unique<legacy::GraphCOO<VT, ET, WT>>(
in_graph.number_of_vertices,
thrust::count_if(rmm::exec_policy(stream)->on(stream),
edge,
edge + in_graph.number_of_edges,
detail::FilterEdges(k, d_sorted_core_num)),
in_graph.has_data(),
stream,
mr);
legacy::GraphCOOView<VT, ET, WT> out_graph_view = out_graph->view();
extract_edges(in_graph, out_graph_view, d_sorted_core_num, k);
return out_graph;
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void core_number(legacy::GraphCSRView<VT, ET, WT> const& graph, VT* core_number)
{
return detail::core_number(graph, core_number);
}
template <typename VT, typename ET, typename WT>
std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> k_core(
legacy::GraphCOOView<VT, ET, WT> const& in_graph,
int k,
VT const* vertex_id,
VT const* core_number,
VT num_vertex_ids,
rmm::mr::device_memory_resource* mr)
{
CUGRAPH_EXPECTS(vertex_id != nullptr, "Invalid input argument: vertex_id is NULL");
CUGRAPH_EXPECTS(core_number != nullptr, "Invalid input argument: core_number is NULL");
CUGRAPH_EXPECTS(k >= 0, "Invalid input argument: k must be >= 0");
return detail::extract_subgraph(in_graph, vertex_id, core_number, k, num_vertex_ids, mr);
}
template void core_number<int32_t, int32_t, float>(
legacy::GraphCSRView<int32_t, int32_t, float> const&, int32_t* core_number);
template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, float>> k_core<int32_t, int32_t, float>(
legacy::GraphCOOView<int32_t, int32_t, float> const&,
int,
int32_t const*,
int32_t const*,
int32_t,
rmm::mr::device_memory_resource*);
template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, double>>
k_core<int32_t, int32_t, double>(legacy::GraphCOOView<int32_t, int32_t, double> const&,
int,
int32_t const*,
int32_t const*,
int32_t,
rmm::mr::device_memory_resource*);
} // namespace cugraph
| 2b6b3f20d34f46ecbc65931f8c4a50e060c59029.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <Hornet.hpp>
#include <Static/CoreNumber/CoreNumber.cuh>
#include <cugraph/legacy/graph.hpp>
#include <cugraph/utilities/error.hpp>
//#include <nvgraph_gdf.h>
namespace cugraph {
namespace detail {
template <typename VT, typename ET, typename WT>
void core_number(legacy::GraphCSRView<VT, ET, WT> const& graph, int* core_number)
{
using HornetGraph = hornet::gpu::HornetStatic<int>;
using HornetInit = hornet::HornetInit<VT>;
using CoreNumber = hornets_nest::CoreNumberStatic;
HornetInit init(graph.number_of_vertices, graph.number_of_edges, graph.offsets, graph.indices);
HornetGraph hnt(init, hornet::DeviceType::DEVICE);
CoreNumber cn(hnt, core_number);
cn.run();
}
struct FilterEdges {
int k;
int* core_number;
FilterEdges(int _k, int* d_core_num) : k(_k), core_number(d_core_num) {}
template <typename T>
__host__ __device__ bool operator()(T t)
{
int src = thrust::get<0>(t);
int dst = thrust::get<1>(t);
return (core_number[src] >= k) && (core_number[dst] >= k);
}
};
template <typename VT, typename ET, typename WT>
void extract_edges(legacy::GraphCOOView<VT, ET, WT> const& i_graph,
legacy::GraphCOOView<VT, ET, WT>& o_graph,
VT* d_core,
int k)
{
cudaStream_t stream{nullptr};
// If an edge satisfies k-core conditions i.e. core_num[src] and core_num[dst]
// are both greater than or equal to k, copy it to the output graph
if (i_graph.has_data()) {
auto inEdge = thrust::make_zip_iterator(
thrust::make_tuple(i_graph.src_indices, i_graph.dst_indices, i_graph.edge_data));
auto outEdge = thrust::make_zip_iterator(
thrust::make_tuple(o_graph.src_indices, o_graph.dst_indices, o_graph.edge_data));
auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
inEdge,
inEdge + i_graph.number_of_edges,
outEdge,
FilterEdges(k, d_core));
if (thrust::distance(outEdge, ptr) != o_graph.number_of_edges) {
CUGRAPH_FAIL("Edge extraction failed");
}
} else {
auto inEdge =
thrust::make_zip_iterator(thrust::make_tuple(i_graph.src_indices, i_graph.dst_indices));
auto outEdge =
thrust::make_zip_iterator(thrust::make_tuple(o_graph.src_indices, o_graph.dst_indices));
auto ptr = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
inEdge,
inEdge + i_graph.number_of_edges,
outEdge,
FilterEdges(k, d_core));
if (thrust::distance(outEdge, ptr) != o_graph.number_of_edges) {
CUGRAPH_FAIL("Edge extraction failed");
}
}
}
// Extract a subgraph from in_graph (with or without weights)
// to out_graph based on whether edges in in_graph satisfy kcore
// conditions.
// i.e. All edges (s,d,w) in in_graph are copied over to out_graph
// if core_num[s] and core_num[d] are greater than or equal to k.
template <typename VT, typename ET, typename WT>
std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> extract_subgraph(
legacy::GraphCOOView<VT, ET, WT> const& in_graph,
int const* vid,
int const* core_num,
int k,
int len,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
cudaStream_t stream{nullptr};
rmm::device_vector<VT> sorted_core_num(in_graph.number_of_vertices);
thrust::scatter(
rmm::exec_policy(stream)->on(stream), core_num, core_num + len, vid, sorted_core_num.begin());
VT* d_sorted_core_num = sorted_core_num.data().get();
// Count number of edges in the input graph that satisfy kcore conditions
// i.e. core_num[src] and core_num[dst] are both greater than or equal to k
auto edge =
thrust::make_zip_iterator(thrust::make_tuple(in_graph.src_indices, in_graph.dst_indices));
auto out_graph = std::make_unique<legacy::GraphCOO<VT, ET, WT>>(
in_graph.number_of_vertices,
thrust::count_if(rmm::exec_policy(stream)->on(stream),
edge,
edge + in_graph.number_of_edges,
detail::FilterEdges(k, d_sorted_core_num)),
in_graph.has_data(),
stream,
mr);
legacy::GraphCOOView<VT, ET, WT> out_graph_view = out_graph->view();
extract_edges(in_graph, out_graph_view, d_sorted_core_num, k);
return out_graph;
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void core_number(legacy::GraphCSRView<VT, ET, WT> const& graph, VT* core_number)
{
return detail::core_number(graph, core_number);
}
template <typename VT, typename ET, typename WT>
std::unique_ptr<legacy::GraphCOO<VT, ET, WT>> k_core(
legacy::GraphCOOView<VT, ET, WT> const& in_graph,
int k,
VT const* vertex_id,
VT const* core_number,
VT num_vertex_ids,
rmm::mr::device_memory_resource* mr)
{
CUGRAPH_EXPECTS(vertex_id != nullptr, "Invalid input argument: vertex_id is NULL");
CUGRAPH_EXPECTS(core_number != nullptr, "Invalid input argument: core_number is NULL");
CUGRAPH_EXPECTS(k >= 0, "Invalid input argument: k must be >= 0");
return detail::extract_subgraph(in_graph, vertex_id, core_number, k, num_vertex_ids, mr);
}
template void core_number<int32_t, int32_t, float>(
legacy::GraphCSRView<int32_t, int32_t, float> const&, int32_t* core_number);
template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, float>> k_core<int32_t, int32_t, float>(
legacy::GraphCOOView<int32_t, int32_t, float> const&,
int,
int32_t const*,
int32_t const*,
int32_t,
rmm::mr::device_memory_resource*);
template std::unique_ptr<legacy::GraphCOO<int32_t, int32_t, double>>
k_core<int32_t, int32_t, double>(legacy::GraphCOOView<int32_t, int32_t, double> const&,
int,
int32_t const*,
int32_t const*,
int32_t,
rmm::mr::device_memory_resource*);
} // namespace cugraph
|
c1adb969d3f357f84baba1bba6b52264564dad2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "DD_MultiGPU_ker.h"
#include <hip/hip_runtime.h>
#include <vector>
#include <algorithm>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/find.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <omp.h>
#define BACK_BLKX 64
#define BACK_BLKY 4
#define BACK_BLKZ 1
#define BLKX 32
#define BLKY 8
#define BLKZ 1
#ifndef __PI__
#define __PI__
#define PI 3.141592653589793
#define PI_2 1.570796326794897
#define PI_4 0.785398163397448
#define PI_3_4 2.356194490192344
#define PI_5_4 3.926990816987241
#define PI_7_4 5.497787143782138
#define TWOPI 6.283185307179586
#endif
#define FORCEINLINE 1
#if FORCEINLINE
#define INLINE __forceinline__
#else
#define INLINE inline
#endif
#ifndef DEBUG
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
// Same function as CUDA_CHECK_RETURN
#define CUDA_SAFE_CALL(call) do{ hipError_t err = call; if (hipSuccess != err) { fprintf (stderr, "Cuda error in file '%s' in line %i : %s.", __FILE__, __LINE__, hipGetErrorString(err) ); exit(EXIT_FAILURE); } } while (0)
#else
#define CUDA_CHECK_RETURN(value) {value;}
#define CUDA_SAFE_CALL(value) {value;}
#endif
typedef unsigned char byte;
#ifndef nullptr
#define nullptr NULL
#endif
INLINE __host__ __device__ const float2 operator/(const float2& a, float b)
{
return make_float2(a.x / b, a.y / b);
}
INLINE __host__ __device__ const float3 operator+(const float3& a, const float3& b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
INLINE __host__ __device__ const float3 operator-(const float3& a, const float3& b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
INLINE __host__ __device__ const float2 operator-(const float2& a, const float2& b)
{
return make_float2(a.x - b.x, a.y - b.y);
}
INLINE __host__ __device__ const float3 operator*(const float3& a, const float3& b)
{
return make_float3(a.x * b.x, a.y * b.y, a.z * b.z);
}
INLINE __host__ __device__ const float3 operator*(const float3& a, float b)
{
return make_float3(a.x * b, a.y * b, a.z * b);
}
INLINE __host__ __device__ const float3 operator/(const float3& a, const float3& b)
{
return make_float3(a.x / b.x, a.y / b.y, a.z / b.z);
}
INLINE __host__ __device__ const float3 operator/(const float3& a, float b)
{
return make_float3(a.x / b, a.y / b, a.z / b);
}
INLINE __host__ __device__ const double3 operator/(const double3& a, double b)
{
return make_double3(a.x / b, a.y / b, a.z / b);
}
INLINE __host__ __device__ const float3 operator-(const int3& a, const float3& b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
INLINE __host__ __device__ float length(const float2& a)
{
return sqrtf(a.x * a.x + a.y * a.y);
}
INLINE __host__ __device__ float length(const float3& a)
{
return sqrtf(a.x * a.x + a.y * a.y + a.z * a.z);
}
INLINE __host__ __device__ double length(const double3& a)
{
return sqrt(a.x * a.x + a.y * a.y + a.z * a.z);
}
INLINE __host__ __device__ const float2 normalize(const float2& a)
{
return a / length(a);
}
INLINE __host__ __device__ const float3 normalize(const float3& a)
{
return a / length(a);
}
INLINE __host__ __device__ const double3 normalize(const double3& a)
{
return a / length(a);
}
INLINE __host__ __device__ float fminf(const float2& a)
{
return fminf(a.x, a.y);
}
INLINE __host__ __device__ float fminf(const float3& a)
{
return fminf(a.x, fminf(a.y, a.z));
}
INLINE __host__ __device__ float fmaxf(const float2& a)
{
return fmaxf(a.x, a.y);
}
INLINE __host__ __device__ float fmaxf(const float3& a)
{
return fmaxf(a.x, fmaxf(a.y, a.z));
}
INLINE __host__ __device__ const float3 fminf(const float3& a, const float3& b)
{
return make_float3(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z));
}
INLINE __host__ __device__ const float3 fmaxf(const float3& a, const float3& b)
{
return make_float3(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z));
}
INLINE __host__ __device__ const float2 fminf(const float2& a, const float2& b)
{
return make_float2(fminf(a.x, b.x), fminf(a.y, b.y));
}
INLINE __host__ __device__ const float2 fmaxf(const float2& a, const float2& b)
{
return make_float2(fmaxf(a.x, b.x), fmaxf(a.y, b.y));
}
INLINE __host__ __device__ bool intersectBox(
const float3& sour,
const float3& dir,
const float3& boxmin,
const float3& boxmax,
float* tnear, float* tfar)
{
const float3 invR = make_float3(1.0 / dir.x, 1.0 / dir.y, 1.0 / dir.z);
const float3 tbot = invR * (boxmin - sour);
const float3 ttop = invR * (boxmax - sour);
const float3 tmin = fminf(ttop, tbot);
const float3 tmax = fmaxf(ttop, tbot);
const float largest_tmin = fmaxf(tmin);
const float smallest_tmax = fminf(tmax);
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
template<typename T>
INLINE __host__ __device__ T regularizeAngle(T curang)
{
T c = curang;
while (c >= TWOPI){ c -= TWOPI; }
while (c < 0){ c += TWOPI; }
return c;
}
INLINE __host__ __device__ void invRotVox(
const float3& curVox,
float3& virVox,
const float2& cossinT,
const float zP)
{
virVox.x = curVox.x * cossinT.x + curVox.y * cossinT.y;
virVox.y =-curVox.x * cossinT.y + curVox.y * cossinT.x;
virVox.z = curVox.z - zP;
}
INLINE __device__ float3 invRot(
const float3 inV,
const float2 cossin,
const float zP)
{
float3 outV;
outV.x = inV.x * cossin.x + inV.y * cossin.y;
outV.x =-inV.x * cossin.y + inV.y * cossin.x;
outV.z = inV.z - zP;
return outV;
}
namespace CTMBIR
{
struct ConstantForBackProjection4{
float x0;
float y0;
float z0;
typedef thrust::tuple<float, float> InTuple;
ConstantForBackProjection4(const float _x0, const float _y0, const float _z0)
: x0(_x0), y0(_y0), z0(_z0){}
__device__ float3 operator()(const InTuple& tp)
{
float curang = regularizeAngle(thrust::get<0>(tp));
float zP = thrust::get<1>(tp);
float cosT = cosf(curang);
float sinT = sinf(curang);
return make_float3(cosT, sinT, zP);
}
};
}
template<typename T>
void DD3Boundaries(int nrBoundaries, T*pCenters, T *pBoundaries)
{
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
template<typename T>
void DD3Boundaries(int nrBoundaries, std::vector<T>& pCenters, T *pBoundaries)
{
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
template<typename T>
void DD3Boundaries(int nrBoundaries,T *pCenters, std::vector<T>& pB)
{
T* pBoundaries = &pB[0];
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
template<typename T>
void DD3Boundaries(int nrBoundaries,std::vector<T>& pC, std::vector<T>& pB)
{
T* pCenters = &pC[0];
T* pBoundaries = &pB[0];
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
// Get one sub-volume from the whole volume.
// Assume that the volumes are stored in Z, X, Y order
template<typename T>
void getSubVolume(const T* vol,
const size_t XN, const size_t YN, const size_t ZN,
const size_t ZIdx_Start, const size_t ZIdx_End, T* subVol)
{
const size_t SZN = ZIdx_End - ZIdx_Start;
for (size_t yIdx = 0; yIdx != YN; ++yIdx)
{
for (size_t xIdx = 0; xIdx != XN; ++xIdx)
{
for (size_t zIdx = ZIdx_Start; zIdx != ZIdx_End; ++zIdx)
{
subVol[(yIdx * XN + xIdx) * SZN + (zIdx - ZIdx_Start)] = vol[(yIdx * XN + xIdx) * ZN + zIdx];
}
}
}
}
template<typename T>
void getSubVolume(const T* vol,
const size_t XYN, const size_t ZN,
const size_t ZIdx_Start, const size_t ZIdx_End, T* subVol)
{
const size_t SZN = ZIdx_End - ZIdx_Start;
for (size_t xyIdx = 0; xyIdx != XYN; ++xyIdx)
{
for (size_t zIdx = ZIdx_Start; zIdx != ZIdx_End; ++zIdx)
{
subVol[xyIdx * SZN + (zIdx - ZIdx_Start)] = vol[xyIdx * ZN + zIdx];
}
}
}
///////////////////////////////////////////////////////////////////////////////////
// For projection, before we divide the volume into serveral sub-volumes, we have
// to calculate the Z index range
template<typename T>
void getVolZIdxPair(const thrust::host_vector<T>& zPos, // Z position of the source.
//NOTE: We only assume the spiral CT case that zPos is increasing.
const size_t PrjIdx_Start, const size_t PrjIdx_End,
const T detCntIdxV, const T detStpZ, const int DNV,
const T objCntIdxZ, const T dz, const int ZN, // Size of the volume
int& ObjIdx_Start, int& ObjIdx_End) // The end is not included
{
const T lowerPart = (detCntIdxV + 0.5) * detStpZ;
const T upperPart = DNV * detStpZ - lowerPart;
const T startPos = zPos[PrjIdx_Start] - lowerPart;
const T endPos = zPos[PrjIdx_End - 1] + upperPart;
ObjIdx_Start = floor((startPos / dz) + objCntIdxZ - 1);
ObjIdx_End = ceil((endPos / dz) + objCntIdxZ + 1) + 1;
ObjIdx_Start = (ObjIdx_Start < 0) ? 0 : ObjIdx_Start;
ObjIdx_Start = (ObjIdx_Start > ZN) ? ZN : ObjIdx_Start;
ObjIdx_End = (ObjIdx_End < 0) ? 0 : ObjIdx_End;
ObjIdx_End = (ObjIdx_End > ZN) ? ZN : ObjIdx_End;
}
///////////////////////////////////////////////////////////////////////////////////
// For backprojection, after decide the subvolume range, we have to decide the
// projection range to cover the subvolume.
template<typename T>
void getPrjIdxPair(const thrust::host_vector<T>& zPos, // Z Position of the source.
// NOTE: we assume that it is pre sorted
const size_t ObjZIdx_Start, const size_t ObjZIdx_End, // sub vol range,
// NOTE: the objZIdx_End is not included
const T objCntIdxZ, const T dz, const int ZN,
const T detCntIdxV, const T detStpZ, const int DNV,
int& prjIdx_Start, int& prjIdx_End)
{
const int PN = zPos.size();
const T lowerPartV = (ObjZIdx_Start - objCntIdxZ - 0.5) * dz;
const T highrPartV = lowerPartV + (ObjZIdx_End - ObjZIdx_Start) * dz;
const T lowerPartDet = (detCntIdxV + 0.5) * detStpZ;
const T upperPartDet = DNV * detStpZ - lowerPartDet;
//The source position
const T sourLPos = lowerPartV - upperPartDet;
const T sourHPos = highrPartV + lowerPartDet;
prjIdx_Start = thrust::upper_bound(zPos.begin(),zPos.end(),sourLPos) - zPos.begin() - 1;
prjIdx_End = thrust::upper_bound(zPos.begin(),zPos.end(),sourHPos) - zPos.begin() + 2;
prjIdx_Start = (prjIdx_Start < 0) ? 0 : prjIdx_Start;
prjIdx_Start = (prjIdx_Start > PN)? PN: prjIdx_Start;
prjIdx_End = (prjIdx_End < 0) ? 0 : prjIdx_End;
prjIdx_End = (prjIdx_End > PN) ? PN : prjIdx_End;
}
////////////////////////////////////////////////////////////////////////////////////
// The volume is also stored in Z, X, Y order
// Not tested yet.
template<typename T>
void combineVolume(
T* vol, // The volume to be combined
const int XN, const int YN, const int ZN,
T** subVol, // All sub volumes
const int* SZN, // Number of slices for each subVolume
const int subVolNum) // Number of sub volumes
{
int kk = 0;
for (size_t yIdx = 0; yIdx != YN; ++yIdx)
{
for (size_t xIdx = 0; xIdx != XN; ++xIdx)
{
kk = 0;
for (size_t volIdx = 0; volIdx != subVolNum; ++volIdx)
{
for (size_t zIdx = 0; zIdx != SZN[volIdx]; ++zIdx)
{
vol[(yIdx * XN + xIdx) * ZN + kk] = subVol[volIdx][(yIdx * XN + xIdx) * SZN[volIdx] + zIdx];
kk = kk + 1;
}
}
}
}
}
template<typename T>
void combineVolume(
T* vol, // The volume to be combined
const int XN, const int YN, const int ZN,
thrust::host_vector<thrust::host_vector<float> >& subVol, // All sub volumes
const int* SZN, // Number of slices for each subVolume
const int subVolNum) // Number of sub volumes
{
int kk = 0;
for (size_t yIdx = 0; yIdx != YN; ++yIdx)
{
for (size_t xIdx = 0; xIdx != XN; ++xIdx)
{
kk = 0;
for (size_t volIdx = 0; volIdx != subVolNum; ++volIdx)
{
for (size_t zIdx = 0; zIdx != SZN[volIdx]; ++zIdx)
{
vol[(yIdx * XN + xIdx) * ZN + kk] = subVol[volIdx][(yIdx * XN + xIdx) * SZN[volIdx] + zIdx];
kk = kk + 1;
}
}
}
}
}
template<typename T>
std::vector<T> operator-(const std::vector<T>& a, const std::vector<T>& b)
{
std::vector<T> res(a.size(), 0);
std::transform(a.begin(),a.end(),b.begin(), res.begin(), [](T aa, T bb){return aa - bb;});
return res;
}
template<typename T>
__device__ inline T intersectLength(const T& fixedmin, const T& fixedmax, const T& varimin, const T& varimax)
{
const T left = (fixedmin > varimin) ? fixedmin : varimin;
const T right = (fixedmax < varimax) ? fixedmax : varimax;
return abs(right - left) * static_cast<double>(right > left);
}
template<typename Ta, typename Tb>
__global__ void naive_copyToTwoVolumes(Ta* in_ZXY,
Tb* out_ZXY, Tb* out_ZYX,
int XN, int YN, int ZN)
{
int idz = threadIdx.x + blockIdx.x * blockDim.x;
int idx = threadIdx.y + blockIdx.y * blockDim.y;
int idy = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < XN && idy < YN && idz < ZN)
{
int i = (idy * XN + idx) * ZN + idz;
int ni = (idy * (XN + 1) + (idx + 1)) * (ZN + 1) + idz + 1;
int nj = (idx * (YN + 1) + (idy + 1)) * (ZN + 1) + idz + 1;
out_ZXY[ni] = in_ZXY[i];
out_ZYX[nj] = in_ZXY[i];
}
}
template<typename Ta, typename Tb>
__global__ void naive_herizontalIntegral(Ta* in, Tb* out, int N, int ZN)
{
int zi = threadIdx.x + blockIdx.x * blockDim.x;
if (zi < ZN)
{
out[zi] = in[zi];
for (int i = 1; i < N; ++i)
{
out[i * ZN + zi] = out[(i - 1) * ZN + zi]
+ in[i * ZN + zi];
}
}
}
template<typename Ta, typename Tb>
__global__ void naive_verticalIntegral(Ta* in, Tb* out, int N, int ZN)
{
int xyi = threadIdx.x + blockIdx.x * blockDim.x;
if (xyi < N)
{
out[xyi * ZN] = in[xyi * ZN];
for (int ii = 1; ii < ZN; ++ii)
{
out[xyi * ZN + ii] = out[xyi * ZN + ii - 1]
+ in[xyi * ZN + ii];
}
}
}
template<typename T>
__global__ void verticalIntegral(T* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
template<typename T>
__global__ void horizontalIntegral(T* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPtr = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPtr + ii * DNV] = prj[headPtr + ii * DNV] + prj[headPtr + (ii - 1) * DNV];
}
}
}
__global__ void naive_vertialIntegral(double* in, int2* out, int N, int ZN)
{
int xyi = threadIdx.x + blockIdx.x * blockDim.x;
if (xyi < N)
{
double temp = in[xyi * ZN];
out[xyi * ZN] = make_int2(__double2loint(temp), __double2hiint(temp));
double temp2 = 0;
for (int ii = 0; ii < ZN; ++ii)
{
temp2 = temp + in[xyi * ZN + ii];
out[xyi * ZN + ii] = make_int2(__double2loint(temp2), __double2hiint(temp2));
temp = temp2;
}
}
}
__global__ void verticalIntegral(float* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
__global__ void horizontalIntegral(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
__global__ void DD3_gpu_proj_branchless_sat2d_ker(
hipTextureObject_t volTex1,
hipTextureObject_t volTex2,
float* proj,
float3 s,
const float3* __restrict__ cossinZT,
const float* __restrict__ xds,
const float* __restrict__ yds,
const float* __restrict__ zds,
const float* __restrict__ bxds,
const float* __restrict__ byds,
const float* __restrict__ bzds,
float3 objCntIdx,
float dx, float dz,
int XN, int YN, int ZN,
int DNU, int DNV, int PN)
{
int detIdV = threadIdx.x + blockIdx.x * blockDim.x;
int detIdU = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
__shared__ float _xds[BLKY];
__shared__ float _yds[BLKY];
_xds[threadIdx.y] = xds[detIdU];
_yds[threadIdx.y] = yds[detIdU];
__syncthreads();
if (detIdU < DNU && detIdV < DNV && angIdx < PN)
{
float3 dir = cossinZT[angIdx];
float3 cursour = make_float3(
s.x * dir.x - s.y * dir.y,
s.x * dir.y + s.y * dir.x,
s.z + dir.z);
s = cossinZT[angIdx];
float summ = _xds[threadIdx.y] * s.x - _yds[threadIdx.y] * s.y;
float obj = _xds[threadIdx.y] * s.y + _yds[threadIdx.y] * s.x;
float realL = bxds[detIdU];
float realR = byds[detIdU];
float realU = bxds[detIdU + 1];
float realD = byds[detIdU + 1];
float2 curDetL = make_float2(
realL * s.x - realR * s.y,
realL * s.y + realR * s.x);
float2 curDetR = make_float2(
realU * s.x - realD * s.y,
realU * s.y + realD * s.x);
float4 curDet = make_float4(
summ, obj, bzds[detIdV] + s.z,
bzds[detIdV + 1] + s.z);
dir = normalize(make_float3(summ, obj,
zds[detIdV] + s.z) - cursour);
summ = 0;
obj = 0;
float intersectLength, intersectHeight;
float invdz = 1.0 / dz;
float invdx = 1.0 / dx;
float factL(1.0f);
float factR(1.0f);
float factU(1.0f);
float factD(1.0f);
float constVal = 0;
if (fabsf(s.x) <= fabsf(s.y))
{
summ = 0;
factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x);
factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x);
factU = (curDet.w - cursour.z) / (curDet.x - cursour.x);
factD = (curDet.z - cursour.z) / (curDet.x - cursour.x);
constVal = dx * dx * dz / fabsf(dir.x);
#pragma unroll
for (int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
realL = (obj - curDetL.x) * factL + curDetL.y;
realR = (obj - curDetR.x) * factR + curDetR.y;
realU = (obj - curDet.x) * factU + curDet.w;
realD = (obj - curDet.x) * factD + curDet.z;
intersectLength = realR - realL;
intersectHeight = realU - realD;
realD = realD * invdz + objCntIdx.z + 1;
realR = realR * invdx + objCntIdx.y + 1;
realU = realU * invdz + objCntIdx.z + 1;
realL = realL * invdx + objCntIdx.y + 1;
summ +=
(
tex3D<float>(volTex2, realD, realL, ii + 0.5)
+ tex3D<float>(volTex2, realU, realR, ii + 0.5)
- tex3D<float>(volTex2, realU, realL, ii + 0.5)
- tex3D<float>(volTex2, realD, realR, ii + 0.5)
) / (intersectLength * intersectHeight);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * constVal;
}
else
{
summ = 0;
factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y);
factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y);
factU = (curDet.w - cursour.z) / (curDet.y - cursour.y);
factD = (curDet.z - cursour.z) / (curDet.y - cursour.y);
constVal = dx * dx * dz / fabsf(dir.y);
#pragma unroll
for (int jj = 0; jj < YN; ++jj)
{
obj = (jj - objCntIdx.y) * dx;
realL = (obj - curDetL.y) * factL + curDetL.x;
realR = (obj - curDetR.y) * factR + curDetR.x;
realU = (obj - curDet.y) * factU + curDet.w;
realD = (obj - curDet.y) * factD + curDet.z;
intersectLength = realR - realL;
intersectHeight = realU - realD;
realD = realD * invdz + objCntIdx.z + 1;
realR = realR * invdx + objCntIdx.x + 1;
realU = realU * invdz + objCntIdx.z + 1;
realL = realL * invdx + objCntIdx.x + 1;
summ +=
(
tex3D<float>(volTex1, realD, realL, jj + 0.5)
+ tex3D<float>(volTex1, realU, realR, jj + 0.5)
- tex3D<float>(volTex1, realU, realL, jj + 0.5)
- tex3D<float>(volTex1, realD, realR, jj + 0.5)
) / (intersectLength * intersectHeight);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * constVal;
}
}
}
__global__ void DD3_gpu_proj_pseudodistancedriven_ker(
hipTextureObject_t volTex,
float* proj, float3 s,
float* d_xds, float* d_yds, float* d_zds,
float3* cossinT,
float3 objCntIdx,
float dx, float dz,
int XN, int YN,
int DNU, int DNV, int PN)
{
int detIdV = threadIdx.x + blockIdx.x * blockDim.x;
int detIdU = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
if (detIdV < DNV && detIdU < DNU && angIdx < PN)
{
float3 cossin = cossinT[angIdx];
float3 cursour = make_float3(
s.x * cossin.x - s.y * cossin.y,
s.x * cossin.y + s.y * cossin.x,
s.z + cossin.z);
float summ = d_xds[detIdU];
float obj = d_yds[detIdU];
float idx = d_zds[detIdV];
float3 curDet = make_float3(
summ * cossin.x - obj * cossin.y,
summ * cossin.y + obj * cossin.x,
idx + cossin.z);
float3 dir = normalize(curDet - cursour);
summ = 0;
obj = 0;
float idxZ;
if (fabsf(cossin.x) <= fabsf(cossin.y))
{
summ = 0;
for (int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
idx = (obj - curDet.x) / dir.x * dir.y + curDet.y;
idxZ = (obj - curDet.x) / dir.x * dir.z + curDet.z;
idx = idx / dx + objCntIdx.y + 0.5;
idxZ = idxZ / dz + objCntIdx.z + 0.5;
summ += tex3D<float>(volTex, idxZ, ii + 0.5f, idx);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * dx / fabsf(dir.x);
}
else
{
summ = 0;
for (int jj = 0; jj != YN; ++jj)
{
obj = (jj - objCntIdx.y) * dx;
idx = (obj - curDet.y) / dir.y * dir.x + curDet.x;
idxZ = (obj - curDet.y) / dir.y * dir.z + curDet.z;
idx = idx / dx + objCntIdx.x + 0.5;
idxZ = idxZ / dz + objCntIdx.z + 0.5;
summ += tex3D<float>(volTex, idxZ, idx, jj + 0.5f);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * dx / fabsf(dir.y);
}
}
}
////Use the split-collect method to do the projection
//void DD3ProjHelical_3GPU(
// float x0, float y0, float z0,
// int DNU, int DNV,
// float* xds, float* yds, float* zds,
// float imgXCenter, float imgYCenter, float imgZCenter,
// float* hangs, float* hzPos, int PN,
// int XN, int YN, int ZN,
// float* hvol, float* hprj,
// float dx, float dz,
// byte* mask, int methodId, int (&startPN)[3])
//{
//
//}
// Divide three sub volumes.
template<typename T>
void GenSubVols(
int* ObjIdx_Start,
int* ObjIdx_End,
int* SZN,
T** subVol,
T* subImgZCenter,
const int subVolN,
const int* PrjIdx_Start,
const int* PrjIdx_End,
const T detCntIdxV,
const T detStpZ,
const T objCntIdxZ,
const T dz,
const int ZN,
const int DNV,
const T imgZCenter,
const int PN,
const int XN,
const int YN,
const T* hvol,
const T* hzPos)
{
if (nullptr == ObjIdx_Start)
{
ObjIdx_Start = new int[subVolN];
}
if(nullptr == ObjIdx_End)
{
ObjIdx_End = new int[subVolN];
}
if(nullptr == SZN)
{
SZN = new int[subVolN];
}
if(nullptr == subVol)
{
subVol = new float*[subVolN];
}
if(nullptr == subImgZCenter)
{
subImgZCenter = new float[subVolN];
}
thrust::host_vector<T> h_zPos(hzPos, hzPos + PN);
omp_set_num_threads(subVolN);
#pragma omp parallel for
for(int i = 0; i < subVolN; ++i) //The last one has problem!!!!!!!!!!
{
getVolZIdxPair<T>(h_zPos,PrjIdx_Start[i],PrjIdx_End[i],
detCntIdxV, detStpZ, DNV, objCntIdxZ, dz, ZN, ObjIdx_Start[i], ObjIdx_End[i]);
std::cout<<i<<" "<<ObjIdx_Start[i]<<" "<<ObjIdx_End[i]<<"\n";
SZN[i] = ObjIdx_End[i] - ObjIdx_Start[i];
subVol[i] = new T[XN * YN * SZN[i]];
//Divide the volume
getSubVolume<T>(hvol, XN * YN, ZN, ObjIdx_Start[i], ObjIdx_End[i], subVol[i]);
//Calculate the corresponding center position
subImgZCenter[i] = ((ObjIdx_End[i] + ObjIdx_Start[i] - (ZN - 1.0)) * dz + imgZCenter * 2.0) / 2.0;
}
}
template<typename T>
void DelSubVols(
int* ObjIdx_Start,
int* ObjIdx_End,
int* SZN,
T** subVol,
T* subImgZCenter, const int subVolN)
{
for(int i = 0; i != subVolN; ++i)
{
delete[] subVol[i];
}
delete[] subVol;
delete[] subImgZCenter;
delete[] ObjIdx_Start;
delete[] ObjIdx_End;
delete[] SZN;
}
void DD3_gpu_proj_pseudodistancedriven_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startPN, int gpuNum)
{
thrust::host_vector<float> hangs(h_angs, h_angs + PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos + PN);
// Mask the volume
for (int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for (int z = 0; z != ZN; ++z)
{
hvol[i * ZN + z] = hvol[i * ZN + z] * v;
}
}
// Calculate the boundary positions
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
const float objCntIdxZ = (ZN - 1.0) * 0.5 - imgZCenter / dz;
// Divide the volume into sub volumes with overlaps according to the startPN
std::vector<int> ObjIdx_Start(gpuNum, -1);
std::vector<int> ObjIdx_End(gpuNum, -1);
std::vector<int> PrjIdx_Start(startPN, startPN+gpuNum);
std::vector<int> PrjIdx_End(gpuNum, 0);
std::copy(PrjIdx_Start.begin()+1, PrjIdx_Start.end(), PrjIdx_End.begin());
PrjIdx_End[gpuNum - 1] = PN;
std::vector<int> SPN = PrjIdx_End - PrjIdx_Start;
std::vector<int> prefixSPN = SPN;
thrust::exclusive_scan(prefixSPN.begin(), prefixSPN.end(), prefixSPN.begin());
//std::cout<<"prefixSPN are "<<prefixSPN[0]<<" "<<prefixSPN[1]<<" "<<prefixSPN[2]<<"\n";
std::vector<int> SZN(gpuNum, 0); // The slices number of each sub volume
const float detStpZ = (zds[DNV-1] - zds[0]) / (DNV - 1); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector center along Z direction
std::vector<std::vector<float> > subVol(gpuNum); // Used to store three sub volumes
std::vector<float> subImgZCenter(gpuNum, 0); // the center of three sub volumes
// Generate multiple streams;
std::vector<hipStream_t> stream(gpuNum);
std::vector<int> siz(gpuNum, 0);
std::vector<hipExtent> volumeSize(gpuNum);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
std::vector<hipArray*> d_volumeArray(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_zds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > angs(gpuNum);
thrust::host_vector<thrust::device_vector<float> > zPos(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
dim3 blk(64, 16, 1);
std::vector<dim3> gid(gpuNum);
std::vector<hipTextureObject_t> texObj(gpuNum);
// First we define the main framework to see how it works.
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
getVolZIdxPair<float>(hzPos, PrjIdx_Start[i], PrjIdx_End[i],
detCntIdxV, detStpZ, DNV, objCntIdxZ, dz, ZN, ObjIdx_Start[i],
ObjIdx_End[i]);
//std::cout<<i<<" "<<ObjIdx_Start[i]<<" "<<ObjIdx_End[i]<<"\n";
SZN[i] = ObjIdx_End[i] - ObjIdx_Start[i];
subVol[i].resize(XN * YN * SZN[i]);
// Divide the volume into multiple sets
getSubVolume<float>(hvol, XN * YN, ZN, ObjIdx_Start[i], ObjIdx_End[i], &(subVol[i][0]));
// NOTE: The explanation will be later:
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjIdx_Start[i] - 0.5f;
CUDA_SAFE_CALL(hipSetDevice(i));
// For each GPU generate two streams
CUDA_SAFE_CALL(hipStreamCreate(&stream[i]));
siz[i] = XN * YN * SZN[i];
d_vol[i].resize(siz[i]);
d_vol[i] = subVol[i];
subVol[i].clear();
volumeSize[i].width = SZN[i];
volumeSize[i].height = XN;
volumeSize[i].depth = YN;
CUDA_SAFE_CALL(hipMalloc3DArray(&d_volumeArray[i], &channelDesc, volumeSize[i]));
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)
thrust::raw_pointer_cast(&d_vol[i][0]),
volumeSize[i].width * sizeof(float),
volumeSize[i].width, volumeSize[i].height);
copyParams.dstArray = d_volumeArray[i];
copyParams.extent = volumeSize[i];
copyParams.kind = hipMemcpyDeviceToDevice;
CUDA_SAFE_CALL(hipMemcpy3DAsync(©Params,stream[i]));
d_vol[i].clear();
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_volumeArray[i];
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.addressMode[2] = hipAddressModeBorder;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
texObj[i] = 0;
CUDA_SAFE_CALL(hipCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr));
prj[i].resize(DNU * DNV * SPN[i]); // Change here
d_xds[i].resize(DNU);
d_yds[i].resize(DNU);
d_zds[i].resize(DNV);
thrust::copy(xds,xds+DNU,d_xds[i].begin());
thrust::copy(yds,yds+DNU,d_yds[i].begin());
thrust::copy(zds,zds+DNV,d_zds[i].begin());
angs[i].resize(SPN[i]);
zPos[i].resize(SPN[i]);
thrust::copy(hangs.begin() + PrjIdx_Start[i],
hangs.begin() + PrjIdx_Start[i] + SPN[i],
angs[i].begin());
thrust::copy(hzPos.begin() + PrjIdx_Start[i],
hzPos.begin() + PrjIdx_Start[i] + SPN[i],
zPos[i].begin());
cossinZT[i].resize(PN);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(angs[i].begin(), zPos[i].begin())),
thrust::make_zip_iterator(thrust::make_tuple(angs[i].end(), zPos[i].end())),
cossinZT[i].begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
angs[i].clear();
zPos[i].clear();
gid[i].x = (DNV + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (SPN[i] + blk.z - 1) / blk.z;
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
DD3_gpu_proj_pseudodistancedriven_ker<< <gid[i], blk, 0, stream[i]>> >(
texObj[i], thrust::raw_pointer_cast(&prj[i][0]),
make_float3(x0, y0, z0),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_zds[i][0]),
thrust::raw_pointer_cast(&cossinZT[i][0]),
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]),
dx, dz, XN, YN, DNU, DNV, SPN[i]);
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
CUDA_SAFE_CALL(hipMemcpyAsync(hprj + DNU * DNV * prefixSPN[i],
thrust::raw_pointer_cast(&prj[i][0]), sizeof(float) * DNU * DNV * SPN[i],
hipMemcpyDeviceToHost,stream[i]));
d_xds[i].clear();
d_yds[i].clear();
d_zds[i].clear();
cossinZT[i].clear();
prj[i].clear();
CUDA_SAFE_CALL(hipDestroyTextureObject(texObj[i]));
CUDA_SAFE_CALL(hipFreeArray(d_volumeArray[i]));
//CUDA_SAFE_CALL(hipStreamDestroy(stream[i*2]));
//CUDA_SAFE_CALL(hipStreamDestroy(stream[i*2 + 1]));
}
// Delete the vectors;
hangs.clear();
hzPos.clear();
ObjIdx_Start.clear();
ObjIdx_End.clear();
PrjIdx_Start.clear();
PrjIdx_End.clear();
SPN.clear();
prefixSPN.clear();
SZN.clear();
subVol.clear();
subImgZCenter.clear();
stream.clear();
siz.clear();
volumeSize.clear();
d_volumeArray.clear();
d_vol.clear();
prj.clear();
d_xds.clear();
d_yds.clear();
d_zds.clear();
angs.clear();
zPos.clear();
cossinZT.clear();
gid.clear();
}
void DD3_gpu_proj_branchless_sat2d_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startPN, int gpuNum)
{
thrust::host_vector<float> hangs(h_angs, h_angs+PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos+PN);
for (int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for (int z = 0; z != ZN; ++z)
{
hvol[i * ZN + z] = hvol[i * ZN + z] * v;
}
}
// Calculate the boundary positions
std::vector<float> bxds(DNU + 1, 0.0f);
std::vector<float> byds(DNU + 1, 0.0f);
std::vector<float> bzds(DNV + 1, 0.0f);
DD3Boundaries<float>(DNU + 1, xds, bxds);
DD3Boundaries<float>(DNU + 1, yds, byds);
DD3Boundaries<float>(DNV + 1, zds, bzds);
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
const float objCntIdxZ = (ZN - 1.0) * 0.5 - imgZCenter / dz;
// Divide the volume into sub volumes with overlaps according to the startPN
std::vector<int> ObjIdx_Start(gpuNum, -1);
std::vector<int> ObjIdx_End(gpuNum, -1);
std::vector<int> PrjIdx_Start(startPN, startPN+gpuNum);
std::vector<int> PrjIdx_End(gpuNum, 0);
std::copy(PrjIdx_Start.begin()+1, PrjIdx_Start.end(), PrjIdx_End.begin());
PrjIdx_End[gpuNum - 1] = PN;
std::vector<int> SPN = PrjIdx_End - PrjIdx_Start;
std::vector<int> prefixSPN = SPN;
thrust::exclusive_scan(prefixSPN.begin(), prefixSPN.end(), prefixSPN.begin());
//std::cout<<"prefixSPN are "<<prefixSPN[0]<<" "<<prefixSPN[1]<<" "<<prefixSPN[2]<<"\n";
std::vector<int> SZN(gpuNum, 0); // The slices number of each sub volume
const float detStpZ = (zds[DNV-1] - zds[0]) / (DNV - 1); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector center along Z direction
std::vector<std::vector<float> > subVol(gpuNum); // Used to store three sub volumes
std::vector<float> subImgZCenter(gpuNum, 0); // the center of three sub volumes
// Generate multiple streams;
std::vector<hipStream_t> stream(gpuNum * 2);
std::vector<int> siz(gpuNum, 0);
std::vector<int> nsiz_ZXY(gpuNum, 0);
std::vector<int> nsiz_ZYX(gpuNum, 0);
std::vector<int> nZN(gpuNum,0);
const int nXN = XN + 1;
const int nYN = YN + 1;
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_ZXY(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_ZYX(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum); // Change here
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_zds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_bxds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_byds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_bzds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > angs(gpuNum);
thrust::host_vector<thrust::device_vector<float> > zPos(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
// Copy to three volumes
dim3 copyblk(64, 16, 1);
std::vector<dim3> copygid(gpuNum);
dim3 satblk1(32,1,1);
dim3 satblk2(64,16,1);
dim3 satgid1_1((nXN * YN + satblk1.x - 1) / satblk1.x, 1, 1);
dim3 satgid1_2((nYN * XN + satblk1.x - 1) / satblk1.x, 1, 1);
std::vector<dim3> satgid2_1(gpuNum);
std::vector<dim3> satgid2_2(gpuNum);
dim3 blk(BLKX, BLKY, BLKZ);
std::vector<dim3> gid(gpuNum);
std::vector<hipExtent> volumeSize1(gpuNum);
std::vector<hipExtent> volumeSize2(gpuNum);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
std::vector<hipArray*> d_volumeArray1(gpuNum);
std::vector<hipArray*> d_volumeArray2(gpuNum);
std::vector<hipTextureObject_t> texObj1(gpuNum);
std::vector<hipTextureObject_t> texObj2(gpuNum);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
getVolZIdxPair<float>(hzPos, PrjIdx_Start[i], PrjIdx_End[i],
detCntIdxV, detStpZ, DNV, objCntIdxZ, dz, ZN, ObjIdx_Start[i],
ObjIdx_End[i]);
SZN[i] = ObjIdx_End[i] - ObjIdx_Start[i];
subVol[i].resize(XN * YN * SZN[i]);
// Divide the volume into multiple sets
getSubVolume<float>(hvol, XN * YN, ZN, ObjIdx_Start[i], ObjIdx_End[i], &(subVol[i][0]));
// NOTE: How it comes
// We need to calculate the (ii - subImgZCenter[i]) * dz to define the
// real physical position of the voxel.
// Assume that the physical center of the whole volume is imgZCenter
// The minimum lower position of the volume is imgZCenter - dz * N / 2;
// Then the corresponding physical lower boundary position of ObjIdx_Start[i]
// is --> imgZCenter - dz * N / 2 + ObjIdx_Start[i] * dz
// while the corresponding physical center position of layer ObjIdx_Start[i]
// is --> imgZCenter - dz * N / 2 + ObjIdx_Start[i] * dz + 0.5 * dz
// We need when ii==0 --> (ii - subImgZCenter[i]) * dz = imgZCenter - dz * N / 2 + ObjIdx_Start[i] * dz + 0.5 * dz
// It means subImgZCenter[i] = -imgZCenter / dz + N / 2 - ObjIdx_Start[i] - 0.5;
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjIdx_Start[i] - 0.5f;
CUDA_SAFE_CALL(hipSetDevice(i));
// For each GPU generate two streams
CUDA_SAFE_CALL(hipStreamCreate(&stream[i * 2]));
CUDA_SAFE_CALL(hipStreamCreate(&stream[i * 2 + 1]));
siz[i] = XN * YN * SZN[i];
nZN[i] = SZN[i] + 1;
nsiz_ZXY[i] = nZN[i] * nXN * YN;
nsiz_ZYX[i] = nZN[i] * nYN * XN;
d_ZXY[i].resize(nsiz_ZXY[i]);
d_ZYX[i].resize(nsiz_ZYX[i]);
d_vol[i].resize(siz[i]);
d_vol[i] = subVol[i];
subVol[i].clear();
copygid[i].x = (SZN[i] + copyblk.x - 1) / copyblk.x;
copygid[i].y = (XN + copyblk.y - 1) / copyblk.y;
copygid[i].z = (YN + copyblk.z - 1) / copyblk.z;
naive_copyToTwoVolumes << <copygid[i], copyblk, 0, stream[2 * i] >> >(
thrust::raw_pointer_cast(&d_vol[i][0]),
thrust::raw_pointer_cast(&d_ZXY[i][0]),
thrust::raw_pointer_cast(&d_ZYX[i][0]),
XN,YN,SZN[i]);
CUDA_SAFE_CALL(hipStreamSynchronize(stream[2 * i]));
CUDA_SAFE_CALL(hipStreamSynchronize(stream[2 * i + 1]));
d_vol[i].clear();
// Generate the SAT for two volumes
satgid2_1[i].x = (nZN[i] + satblk2.x - 1) / satblk2.x;
satgid2_1[i].y = (YN + satblk2.y - 1) / satblk2.y;
satgid2_1[i].z = 1;
satgid2_2[i].x = (nZN[i] + satblk2.x - 1) / satblk2.x;
satgid2_2[i].y = (XN + satblk2.y - 1) / satblk2.y;
satgid2_2[i].z = 1;
verticalIntegral << <satgid1_1, satblk1, 0, stream[2 * i] >> >(
thrust::raw_pointer_cast(&d_ZXY[i][0]), nZN[i], nXN * YN);
horizontalIntegral << <satgid2_1[i], satblk2, 0, stream[2 * i] >> >(
thrust::raw_pointer_cast(&d_ZXY[i][0]), nXN, nZN[i], YN);
verticalIntegral << <satgid1_2, satblk1, 0, stream[2 * i + 1] >> >(
thrust::raw_pointer_cast(&d_ZYX[i][0]), nZN[i], nYN * XN);
horizontalIntegral << <satgid2_2[i], satblk2, 0, stream[2 * i + 1] >> >(
thrust::raw_pointer_cast(&d_ZYX[i][0]), nYN, nZN[i], XN);
//Bind to the texture;
volumeSize1[i].width = nZN[i];
volumeSize1[i].height = nXN;
volumeSize1[i].depth = YN;
volumeSize2[i].width = nZN[i];
volumeSize2[i].height = nYN;
volumeSize2[i].depth = XN;
CUDA_SAFE_CALL(hipMalloc3DArray(&d_volumeArray1[i], &channelDesc, volumeSize1[i]));
CUDA_SAFE_CALL(hipMalloc3DArray(&d_volumeArray2[i], &channelDesc, volumeSize2[i]));
hipMemcpy3DParms copyParams1 = { 0 };
copyParams1.srcPtr = make_hipPitchedPtr((void*)
thrust::raw_pointer_cast(&d_ZXY[i][0]),
volumeSize1[i].width * sizeof(float),
volumeSize1[i].width, volumeSize1[i].height);
copyParams1.dstArray = d_volumeArray1[i];
copyParams1.extent = volumeSize1[i];
copyParams1.kind = hipMemcpyDeviceToDevice;
hipMemcpy3DParms copyParams2 = { 0 };
copyParams2.srcPtr = make_hipPitchedPtr((void*)
thrust::raw_pointer_cast(&d_ZYX[i][0]),
volumeSize2[i].width * sizeof(float),
volumeSize2[i].width, volumeSize2[i].height);
copyParams2.dstArray = d_volumeArray2[i];
copyParams2.extent = volumeSize2[i];
copyParams2.kind = hipMemcpyDeviceToDevice;
CUDA_SAFE_CALL(hipMemcpy3DAsync(©Params1,stream[2 * i]));
CUDA_SAFE_CALL(hipMemcpy3DAsync(©Params2,stream[2 * i + 1]));
d_ZXY[i].clear();
d_ZYX[i].clear();
hipResourceDesc resDesc1;
hipResourceDesc resDesc2;
memset(&resDesc1, 0, sizeof(resDesc1));
memset(&resDesc2, 0, sizeof(resDesc2));
resDesc1.resType = hipResourceTypeArray;
resDesc2.resType = hipResourceTypeArray;
resDesc1.res.array.array = d_volumeArray1[i];
resDesc2.res.array.array = d_volumeArray2[i];
hipTextureDesc texDesc1;
hipTextureDesc texDesc2;
memset(&texDesc1, 0, sizeof(texDesc1));
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc1.addressMode[0] = hipAddressModeClamp;
texDesc1.addressMode[1] = hipAddressModeClamp;
texDesc1.addressMode[2] = hipAddressModeClamp;
texDesc2.addressMode[0] = hipAddressModeClamp;
texDesc2.addressMode[1] = hipAddressModeClamp;
texDesc2.addressMode[2] = hipAddressModeClamp;
texDesc1.filterMode = hipFilterModeLinear;
texDesc2.filterMode = hipFilterModeLinear;
texDesc1.readMode = hipReadModeElementType;
texDesc2.readMode = hipReadModeElementType;
texDesc1.normalizedCoords = false;
texDesc2.normalizedCoords = false;
texObj1[i] = 0;
texObj2[i] = 0;
CUDA_SAFE_CALL(hipCreateTextureObject(&texObj1[i], &resDesc1, &texDesc1, nullptr));
CUDA_SAFE_CALL(hipCreateTextureObject(&texObj2[i], &resDesc2, &texDesc2, nullptr));
prj[i].resize(DNU * DNV * SPN[i]); // Change here
d_xds[i].resize(DNU);
d_yds[i].resize(DNU);
d_zds[i].resize(DNV);
thrust::copy(xds,xds+DNU,d_xds[i].begin());
thrust::copy(yds,yds+DNU,d_yds[i].begin());
thrust::copy(zds,zds+DNV,d_zds[i].begin());
d_bxds[i].resize(bxds.size());
d_bxds[i] = bxds;
d_byds[i].resize(byds.size());
d_byds[i] = byds;
d_bzds[i].resize(bzds.size());
d_bzds[i] = bzds;
angs[i].resize(SPN[i]);
zPos[i].resize(SPN[i]);
thrust::copy(hangs.begin() + PrjIdx_Start[i],
hangs.begin() + PrjIdx_Start[i] + SPN[i],
angs[i].begin());
thrust::copy(hzPos.begin() + PrjIdx_Start[i],
hzPos.begin() + PrjIdx_Start[i] + SPN[i],
zPos[i].begin());
cossinZT[i].resize(PN);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(angs[i].begin(), zPos[i].begin())),
thrust::make_zip_iterator(thrust::make_tuple(angs[i].end(), zPos[i].end())),
cossinZT[i].begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
angs[i].clear();
zPos[i].clear();
gid[i].x = (DNV + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (SPN[i] + blk.z - 1) / blk.z;
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
DD3_gpu_proj_branchless_sat2d_ker << <gid[i], blk, 0, stream[i * 2]>> >(
texObj1[i], texObj2[i],
thrust::raw_pointer_cast(&prj[i][0]),
make_float3(x0, y0, z0),
thrust::raw_pointer_cast(&cossinZT[i][0]),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_zds[i][0]),
thrust::raw_pointer_cast(&d_bxds[i][0]),
thrust::raw_pointer_cast(&d_byds[i][0]),
thrust::raw_pointer_cast(&d_bzds[i][0]),
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]),
dx, dz, XN, YN, ZN, DNU, DNV, SPN[i]);
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
CUDA_SAFE_CALL(hipMemcpyAsync(hprj + DNU * DNV * prefixSPN[i],
thrust::raw_pointer_cast(&prj[i][0]), sizeof(float) * DNU * DNV * SPN[i],
hipMemcpyDeviceToHost,stream[2*i]));
d_xds[i].clear();
d_yds[i].clear();
d_zds[i].clear();
d_bxds[i].clear();
d_byds[i].clear();
d_bzds[i].clear();
cossinZT[i].clear();
prj[i].clear();
CUDA_SAFE_CALL(hipDestroyTextureObject(texObj1[i]));
CUDA_SAFE_CALL(hipDestroyTextureObject(texObj2[i]));
CUDA_SAFE_CALL(hipFreeArray(d_volumeArray1[i]));
CUDA_SAFE_CALL(hipFreeArray(d_volumeArray2[i]));
}
// Clear the vectors
hangs.clear();
hzPos.clear();
bxds.clear();
byds.clear();
bzds.clear();
ObjIdx_Start.clear();
ObjIdx_End.clear();
PrjIdx_Start.clear();
PrjIdx_End.clear();
SPN.clear();
prefixSPN.clear();
SZN.clear();
subVol.clear();
subImgZCenter.clear();
stream.clear();
siz.clear();
nsiz_ZXY.clear();
nsiz_ZYX.clear();
nZN.clear();
d_vol.clear();
d_ZXY.clear();
d_ZYX.clear();
prj.clear();
d_xds.clear();
d_yds.clear();
d_zds.clear();
d_bxds.clear();
d_byds.clear();
d_bzds.clear();
angs.clear();
zPos.clear();
cossinZT.clear();
copygid.clear();
satgid2_1.clear();
satgid2_2.clear();
gid.clear();
volumeSize1.clear();
volumeSize2.clear();
d_volumeArray1.clear();
d_volumeArray2.clear();
}
extern "C"
void DD3Proj_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* hangs, float* hzPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask, int prjMode, const int* startPN, int gpuNum)
{
switch(prjMode)
{
case 0: // Branchless DD model based multi-GPU projection
DD3_gpu_proj_branchless_sat2d_multiGPU(x0, y0, z0, DNU, DNV,
xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj, dx, dz,
mask, startPN, gpuNum);
break;
default: // Pseudo DD based multi-GPUs projection
DD3_gpu_proj_pseudodistancedriven_multiGPU(x0, y0, z0, DNU, DNV,
xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj, dx, dz,
mask, startPN, gpuNum);
break;
}
}
enum BackProjectionMethod{ _BRANCHLESS, _PSEUDODD, _ZLINEBRANCHLESS, _VOLUMERENDERING };
#ifndef CALDETPARAS
#define CALDETPARAS
float4 calDetParas(float* xds, float* yds, float* zds, float x0, float y0, float z0, int DNU, int DNV)
{
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
float* bzds = new float[DNV + 1];
DD3Boundaries(DNU + 1, xds, bxds);
DD3Boundaries(DNU + 1, yds, byds);
DD3Boundaries(DNV + 1, zds, bzds);
float ddv = (bzds[DNV] - bzds[0]) / DNV;
float detCtrIdxV = (-(bzds[0] - z0) / ddv) - 0.5;
float2 dir = normalize(make_float2(-x0, -y0));
float2 dirL = normalize(make_float2(bxds[0] - x0, byds[0] - y0));
float2 dirR = normalize(make_float2(bxds[DNU] - x0, byds[DNU] - y0));
float dbeta = asin(dirL.x * dirR.y - dirL.y * dirR.x) / DNU;
float minBeta = asin(dir.x * dirL.y - dir.y * dirL.x);
float detCtrIdxU = -minBeta / dbeta - 0.5;
delete [] bxds;
delete [] byds;
delete [] bzds;
return make_float4(detCtrIdxU, detCtrIdxV, dbeta, ddv);
}
#endif
__global__ void addTwoSidedZeroBoarder(float* prjIn, float* prjOut,
const int DNU, const int DNV, const int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < DNV && pn < PN)
{
int inIdx = (pn * DNU + idu) * DNV + idv;
int outIdx = (pn * (DNU + 2) + (idu + 1)) * (DNV + 2) + idv + 1;
prjOut[outIdx] = prjIn[inIdx];
}
}
__global__ void addOneSidedZeroBoarder(const float* prj_in, float* prj_out, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < DNV && pn < PN)
{
int i = (pn * DNU + idu) * DNV + idv;
int ni = (pn * (DNU + 1) + (idu + 1)) * (DNV + 1) + idv + 1;
prj_out[ni] = prj_in[i];
}
}
__global__ void verticalIntegral2(float* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
__global__ void heorizontalIntegral2(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
thrust::device_vector<float> genSAT_of_Projection(
float* hprj,
int DNU, int DNV, int PN)
{
const int siz = DNU * DNV * PN;
const int nsiz = (DNU + 1) * (DNV + 1) * PN;
thrust::device_vector<float> prjSAT(nsiz, 0);
thrust::device_vector<float> prj(hprj, hprj + siz);
dim3 copyBlk(64, 16, 1);
dim3 copyGid(
(DNV + copyBlk.x - 1) / copyBlk.x,
(DNU + copyBlk.y - 1) / copyBlk.y,
(PN + copyBlk.z - 1) / copyBlk.z);
addOneSidedZeroBoarder << <copyGid, copyBlk >> >(
thrust::raw_pointer_cast(&prj[0]),
thrust::raw_pointer_cast(&prjSAT[0]),
DNU, DNV, PN);
const int nDNU = DNU + 1;
const int nDNV = DNV + 1;
copyBlk.x = 512;
copyBlk.y = 1;
copyBlk.z = 1;
copyGid.x = (nDNU * PN + copyBlk.x - 1) / copyBlk.x;
copyGid.y = 1;
copyGid.z = 1;
verticalIntegral2 << <copyGid, copyBlk >> >(
thrust::raw_pointer_cast(&prjSAT[0]),
nDNV, nDNU * PN);
copyBlk.x = 64;
copyBlk.y = 16;
copyBlk.z = 1;
copyGid.x = (nDNV + copyBlk.x - 1) / copyBlk.x;
copyGid.y = (PN + copyBlk.y - 1) / copyBlk.y;
copyGid.z = 1;
heorizontalIntegral2 << <copyGid, copyBlk >> >(
thrust::raw_pointer_cast(&prjSAT[0]),
nDNU, nDNV, PN);
return prjSAT;
}
void createTextureObject(
hipTextureObject_t& texObj,
hipArray* d_prjArray,
int Width, int Height, int Depth,
float* sourceData,
hipMemcpyKind memcpyKind,
hipTextureAddressMode addressMode,
hipTextureFilterMode textureFilterMode,
hipTextureReadMode textureReadMode,
bool isNormalized)
{
hipExtent prjSize;
prjSize.width = Width;
prjSize.height = Height;
prjSize.depth = Depth;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_prjArray, &channelDesc, prjSize);
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr(
(void*) sourceData, prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray;
copyParams.extent = prjSize;
copyParams.kind = memcpyKind;
hipMemcpy3D(©Params);
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_prjArray;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = addressMode;
texDesc.addressMode[1] = addressMode;
texDesc.addressMode[2] = addressMode;
texDesc.filterMode = textureFilterMode;
texDesc.readMode = textureReadMode;
texDesc.normalizedCoords = isNormalized;
hipCreateTextureObject(&texObj, &resDesc, &texDesc, nullptr);
}
void destroyTextureObject(hipTextureObject_t& texObj, hipArray* d_array)
{
hipDestroyTextureObject(texObj);
hipFreeArray(d_array);
}
template < BackProjectionMethod METHOD >
__global__ void DD3_gpu_back_ker(
hipTextureObject_t prjTexObj,
float* vol,
const byte* __restrict__ msk,
const float3* __restrict__ cossinT,
float3 s,
float S2D,
float3 curvox,
float dx, float dz,
float dbeta, float ddv,
float2 detCntIdx,
int3 VN,
int PN, int squared)
{}
template<>
__global__ void DD3_gpu_back_ker<_BRANCHLESS>(
hipTextureObject_t prjTexObj,
float* vol,
const byte* __restrict__ msk,
const float3* __restrict__ cossinT,
float3 s,
float S2D,
float3 curvox,
float dx, float dz,
float dbeta, float ddv,
float2 detCntIdx,
int3 VN,
int PN, int squared)
{
int3 id;
id.z = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
id.x = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
id.y = threadIdx.z + __umul24(blockIdx.z, blockDim.z);
if (id.x < VN.x && id.y < VN.y && id.z < VN.z)
{
if (msk[id.y * VN.x + id.x] != 1)
return;
curvox = (id - curvox) * make_float3(dx, dx, dz);
float3 cursour;
float idxL, idxR, idxU, idxD;
float cosVal;
float summ = 0;
float3 cossin;
float inv_sid = 1.0 / sqrtf(s.x * s.x + s.y * s.y);
float3 dir;
float l_square;
float l;
float alpha;
float deltaAlpha;
S2D = S2D / ddv;
dbeta = 1.0 / dbeta;
dz = dz * 0.5;
for (int angIdx = 0; angIdx < PN; ++angIdx)
{
cossin = cossinT[angIdx];
cursour = make_float3(
s.x * cossin.x - s.y * cossin.y,
s.x * cossin.y + s.y * cossin.x,
s.z + cossin.z);
dir = curvox - cursour;
l_square = dir.x * dir.x + dir.y * dir.y;
l = rsqrtf(l_square);
idxU = (dir.z + dz) * S2D * l + detCntIdx.y + 1;
idxD = (dir.z - dz) * S2D * l + detCntIdx.y + 1;
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * inv_sid * l);
if (fabsf(cursour.x) > fabsf(cursour.y))
{
ddv = dir.x;
}
else
{
ddv = dir.y;
}
deltaAlpha = ddv / l_square * dx * 0.5;
cosVal = dx / ddv * sqrtf(l_square + dir.z * dir.z);
idxL = (alpha - deltaAlpha) * dbeta + detCntIdx.x + 1;
idxR = (alpha + deltaAlpha) * dbeta + detCntIdx.x + 1;
summ +=
(-tex3D<float>(prjTexObj, idxD, idxR, angIdx + 0.5)
- tex3D<float>(prjTexObj, idxU, idxL, angIdx + 0.5)
+ tex3D<float>(prjTexObj, idxD, idxL, angIdx + 0.5)
+ tex3D<float>(prjTexObj, idxU, idxR, angIdx + 0.5)) * cosVal;
}
__syncthreads();
vol[__umul24((__umul24(id.y, VN.x) + id.x), VN.z) + id.z] = summ;
}
}
template<>
__global__ void DD3_gpu_back_ker<_PSEUDODD>(
hipTextureObject_t texObj,
float* vol,
const byte* __restrict__ msk,
const float3* __restrict__ cossinZT,
float3 s,
float S2D,
float3 objCntIdx,
float dx, float dz, float dbeta, float ddv,
float2 detCntIdx,
int3 VN, int PN, int squared)
{
int k = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int i = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
int j = __mul24(blockIdx.z, blockDim.z) + threadIdx.z;
if (i < VN.x && j < VN.y && k < VN.z)
{
if (msk[j * VN.x + i] != 1)
return;
float3 curVox = make_float3(
(i - objCntIdx.x) * dx,
(j - objCntIdx.y) * dx,
(k - objCntIdx.z) * dz);
float3 dir;
float3 cursour;
float invsid = rsqrtf(s.x * s.x + s.y * s.y);
float invl;
float idxZ;
float idxXY;
float alpha;
float cosVal;
float3 cossinT;
float summ = 0;
S2D = S2D / ddv;
dbeta = 1.0 / dbeta;
for (int angIdx = 0; angIdx != PN; ++angIdx)
{
cossinT = cossinZT[angIdx];
cursour = make_float3(
s.x * cossinT.x - s.y * cossinT.y,
s.x * cossinT.y + s.y * cossinT.x,
s.z + cossinT.z);
dir = curVox - cursour;
ddv = dir.x * dir.x + dir.y * dir.y;
invl = rsqrtf(ddv);
idxZ = dir.z * S2D * invl + detCntIdx.y + 0.5;
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * invl * invsid);
if (fabsf(cursour.x) >= fabsf(cursour.y))
{
cosVal = fabsf(1.0 / dir.x);
}
else
{
cosVal = fabsf(1.0 / dir.y);
}
cosVal *= (dx * sqrtf(ddv + dir.z * dir.z));
idxXY = alpha * dbeta + detCntIdx.x + 0.5;
summ += tex3D<float>(texObj, idxZ, idxXY, angIdx + 0.5f) * cosVal;
}
__syncthreads();
vol[(j * VN.x + i) * VN.z + k] = summ;
}
}
void DD3Back_branchless_sat2d_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startVOL, int gpuNum)
{
const int nDNU = DNU + 1;
const int nDNV = DNV + 1;
thrust::host_vector<float> hangs(h_angs, h_angs + PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos + PN);
std::vector<int> ObjZIdx_Start(startVOL, startVOL + gpuNum);
std::vector<int> ObjZIdx_End(ObjZIdx_Start.size());
std::copy(ObjZIdx_Start.begin() + 1, ObjZIdx_Start.end(), ObjZIdx_End.begin());
ObjZIdx_End[gpuNum - 1] = ZN;
std::vector<int> prjIdx_Start(gpuNum);
std::vector<int> prjIdx_End(gpuNum);
const float objCntIdxZ = (ZN - 1.0f) * 0.5 - imgZCenter / dz;
const float detStpZ = (zds[DNV - 1] - zds[0]) / (DNV - 1.0f); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector Center along Z direction
std::vector<int> SZN = ObjZIdx_End - ObjZIdx_Start; // sub volume slices number
std::vector<float> subImgZCenter(gpuNum,0.0f);
std::vector<int> SPN(gpuNum);
const float objCntIdxX = (XN - 1.0f) * 0.5f - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0f) * 0.5f - imgYCenter / dx;
std::vector<float3> sour(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > vol(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
thrust::host_vector<hipArray*> d_prjArray(gpuNum);
thrust::host_vector<hipTextureObject_t> texObj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prjSAT(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<hipStream_t> stream(gpuNum);
const float4 detParas = calDetParas(xds, yds, zds, x0, y0, z0, DNU, DNV);
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
// Pre calculate the cossin z positions
thrust::device_vector<float3> COSSINZT(PN);
thrust::device_vector<float> ANGS = hangs;
thrust::device_vector<float> ZPOS = hzPos;
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(ANGS.begin(), ZPOS.begin())),
thrust::make_zip_iterator(thrust::make_tuple(ANGS.end(), ZPOS.end())),
COSSINZT.begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
dim3 copyBlk(64,16,1);
thrust::host_vector<dim3> copyGid(gpuNum);
dim3 blk(BACK_BLKX, BACK_BLKY, BACK_BLKZ);
thrust::host_vector<dim3> gid(gpuNum);
dim3 vertGenBlk(512,1,1);
thrust::host_vector<dim3> vertGenGid(gpuNum);
dim3 horzGenBlk(64,16,1);
thrust::host_vector<dim3> horzGenGid(gpuNum);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
thrust::host_vector<thrust::host_vector<float> > subVol(gpuNum);
std::vector<size_t> siz(gpuNum,0);
std::vector<size_t> nsiz(gpuNum,0);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
// get projection view index pair
getPrjIdxPair<float>(hzPos, ObjZIdx_Start[i], ObjZIdx_End[i],
objCntIdxZ, dz, ZN, detCntIdxV, detStpZ, DNV,
prjIdx_Start[i], prjIdx_End[i]);
SPN[i] = prjIdx_End[i] - prjIdx_Start[i];
//std::cout<<i<<" "<<prjIdx_Start[i]<<" "<<prjIdx_End[i]<<"\n";
// Calculate the corresponding center position index of the sub volumes
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjZIdx_Start[i] - 0.5f; // index position
hipSetDevice(i);
hipStreamCreate(&stream[i]);
// Generate the SAT for the projection data
siz[i] = DNU * DNV * SPN[i];
nsiz[i] = (DNU + 1) * (DNV + 1) * SPN[i];
prjSAT[i].resize(nsiz[i]);
prj[i].resize(siz[i]);
thrust::copy(
hprj + DNU * DNV * prjIdx_Start[i],
hprj + DNU * DNV * prjIdx_End[i],
prj[i].begin());
copyGid[i].x = (DNV + copyBlk.x - 1) / copyBlk.x;
copyGid[i].y = (DNU + copyBlk.y - 1) / copyBlk.y;
copyGid[i].z = (SPN[i] + copyBlk.z - 1) / copyBlk.z;
hipLaunchKernelGGL(( addOneSidedZeroBoarder), dim3(copyGid[i]), dim3(copyBlk), 0, stream[i],
thrust::raw_pointer_cast(&prj[i][0]),
thrust::raw_pointer_cast(&prjSAT[i][0]),
DNU, DNV, SPN[i]);
//hipStreamSynchronize(stream[i]);
vertGenGid[i].x = (nDNU * SPN[i] + vertGenBlk.x - 1) / copyBlk.x;
vertGenGid[i].y = 1;
vertGenGid[i].z = 1;
verticalIntegral2 << <vertGenGid[i], vertGenBlk, 0, stream[i] >> >(
thrust::raw_pointer_cast(&prjSAT[i][0]), nDNV, nDNU * SPN[i]);
horzGenGid[i].x = (nDNV + horzGenBlk.x - 1) / horzGenBlk.x;
horzGenGid[i].y = (SPN[i] + horzGenBlk.y - 1) / horzGenBlk.y;
horzGenGid[i].z = 1;
heorizontalIntegral2 << <horzGenGid[i], horzGenBlk,0,stream[i] >> >(
thrust::raw_pointer_cast(&prjSAT[i][0]), nDNU, nDNV, SPN[i]);
prj[i].clear();
hipExtent prjSize;
prjSize.width = DNV + 1;
prjSize.height = DNU + 1;
prjSize.depth = SPN[i];
hipMalloc3DArray(&d_prjArray[i], &channelDesc, prjSize);
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr(
(void*) thrust::raw_pointer_cast(&prjSAT[i][0]),
prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize;
copyParams.kind = hipMemcpyDeviceToDevice;
hipMemcpy3DAsync(©Params,stream[i]);
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.addressMode[2] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
hipCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
prjSAT[i].clear();
// The part above are for branchless DD
gid[i].x = (SZN[i] + blk.x - 1) / blk.x;
gid[i].y = (XN + blk.y - 1) / blk.y;
gid[i].z = (YN + blk.z - 1) / blk.z;
vol[i].resize(XN * YN * SZN[i]);
msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, msk[i].begin());
cossinZT[i].resize(SPN[i]);
thrust::copy(
COSSINZT.begin() + prjIdx_Start[i],
COSSINZT.begin() + prjIdx_End[i],
cossinZT[i].begin());
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
DD3_gpu_back_ker<_BRANCHLESS> << <gid[i], blk, 0, stream[i] >> >(texObj[i],
thrust::raw_pointer_cast(&vol[i][0]), thrust::raw_pointer_cast(&msk[i][0]),
thrust::raw_pointer_cast(&cossinZT[i][0]), make_float3(x0, y0, z0), S2D,
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]), // have to be changed
dx, dz, detParas.z, detParas.w, make_float2(detParas.x, detParas.y),
make_int3(XN, YN, SZN[i]), SPN[i], 0);
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0 ;i < gpuNum; ++i)
{
hipSetDevice(i);
// copy the volume back.
subVol[i].resize(XN * YN * SZN[i]);
thrust::copy(vol[i].begin(), vol[i].end(), subVol[i].begin());
vol[i].clear();
msk[i].clear();
cossinZT[i].clear();
hipDestroyTextureObject(texObj[i]);
hipFreeArray(d_prjArray[i]);
}
hipDeviceSynchronize();
combineVolume<float>(hvol, XN, YN, ZN, subVol, &(SZN[0]), gpuNum);
hangs.clear();
hzPos.clear();
ObjZIdx_Start.clear();
ObjZIdx_End.clear();
prjIdx_Start.clear();
prjIdx_End.clear();
SZN.clear();
subImgZCenter.clear();
SPN.clear();
sour.clear();
msk.clear();
vol.clear();
cossinZT.clear();
d_prjArray.clear();
texObj.clear();
prjSAT.clear();
prj.clear();
stream.clear();
COSSINZT.clear();
ANGS.clear();
ZPOS.clear();
copyGid.clear();
gid.clear();
vertGenGid.clear();
horzGenGid.clear();
}
void DD3Back_pseudo_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startVOL, int gpuNum)
{
thrust::host_vector<float> hangs(h_angs, h_angs + PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos + PN);
std::vector<int> ObjZIdx_Start(startVOL, startVOL + gpuNum);
std::vector<int> ObjZIdx_End(ObjZIdx_Start.size());
std::copy(ObjZIdx_Start.begin() + 1, ObjZIdx_Start.end(), ObjZIdx_End.begin());
ObjZIdx_End[gpuNum - 1] = ZN;
std::vector<int> prjIdx_Start(gpuNum);
std::vector<int> prjIdx_End(gpuNum);
const float objCntIdxZ = (ZN - 1.0f) * 0.5 - imgZCenter / dz;
const float detStpZ = (zds[DNV - 1] - zds[0]) / (DNV - 1.0f); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector Center along Z direction
std::vector<int> SZN = ObjZIdx_End - ObjZIdx_Start; // sub volume slices number
std::vector<float> subImgZCenter(gpuNum,0.0f);
std::vector<int> SPN(gpuNum);
const float objCntIdxX = (XN - 1.0f) * 0.5f - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0f) * 0.5f - imgYCenter / dx;
std::vector<float3> sour(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > vol(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
thrust::host_vector<hipArray*> d_prjArray(gpuNum);
thrust::host_vector<hipTextureObject_t> texObj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<hipStream_t> stream(gpuNum);
const float4 detParas = calDetParas(xds, yds, zds, x0, y0, z0, DNU, DNV);
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
// Pre calculate the cossin z positions
thrust::device_vector<float3> COSSINZT(PN);
thrust::device_vector<float> ANGS = hangs;
thrust::device_vector<float> ZPOS = hzPos;
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(ANGS.begin(), ZPOS.begin())),
thrust::make_zip_iterator(thrust::make_tuple(ANGS.end(), ZPOS.end())),
COSSINZT.begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
dim3 copyBlk(64,16,1);
thrust::host_vector<dim3> copyGid(gpuNum);
dim3 blk(BACK_BLKX, BACK_BLKY, BACK_BLKZ);
thrust::host_vector<dim3> gid(gpuNum);
dim3 vertGenBlk(512,1,1);
thrust::host_vector<dim3> vertGenGid(gpuNum);
dim3 horzGenBlk(64,16,1);
thrust::host_vector<dim3> horzGenGid(gpuNum);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
thrust::host_vector<thrust::host_vector<float> > subVol(gpuNum);
std::vector<size_t> siz(gpuNum,0);
std::vector<size_t> nsiz(gpuNum,0);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
// get projection view index pair
getPrjIdxPair<float>(hzPos, ObjZIdx_Start[i], ObjZIdx_End[i],
objCntIdxZ, dz, ZN, detCntIdxV, detStpZ, DNV,
prjIdx_Start[i], prjIdx_End[i]);
SPN[i] = prjIdx_End[i] - prjIdx_Start[i];
//std::cout<<i<<" "<<prjIdx_Start[i]<<" "<<prjIdx_End[i]<<"\n";
// Calculate the corresponding center position index of the sub volumes
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjZIdx_Start[i] - 0.5f; // index position
hipSetDevice(i);
hipStreamCreate(&stream[i]);
////////////////////////////////////////////////////////////////////////
siz[i] = DNU * DNV * SPN[i];
prj[i].resize(siz[i]);
thrust::copy(
hprj + DNU * DNV * prjIdx_Start[i],
hprj + DNU * DNV * prjIdx_End[i],
prj[i].begin());
hipExtent prjSize;
prjSize.width = DNV;
prjSize.height = DNU;
prjSize.depth = SPN[i];
hipMalloc3DArray(&d_prjArray[i], &channelDesc, prjSize);
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr(
(void*) thrust::raw_pointer_cast(&prj[i][0]),
prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize;
copyParams.kind = hipMemcpyDeviceToDevice;
hipMemcpy3DAsync(©Params,stream[i]);
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.addressMode[2] = hipAddressModeBorder;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
hipCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
prj[i].clear();
////////////////////////////////////////////////////////////////////////
// Generate the SAT for the projection data
// The part above are for branchless DD
gid[i].x = (SZN[i] + blk.x - 1) / blk.x;
gid[i].y = (XN + blk.y - 1) / blk.y;
gid[i].z = (YN + blk.z - 1) / blk.z;
vol[i].resize(XN * YN * SZN[i]);
msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, msk[i].begin());
cossinZT[i].resize(SPN[i]);
thrust::copy(
COSSINZT.begin() + prjIdx_Start[i],
COSSINZT.begin() + prjIdx_End[i],
cossinZT[i].begin());
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
DD3_gpu_back_ker<_PSEUDODD> << <gid[i], blk, 0, stream[i] >> >(texObj[i],
thrust::raw_pointer_cast(&vol[i][0]), thrust::raw_pointer_cast(&msk[i][0]),
thrust::raw_pointer_cast(&cossinZT[i][0]), make_float3(x0, y0, z0), S2D,
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]), // have to be changed
dx, dz, detParas.z, detParas.w, make_float2(detParas.x, detParas.y),
make_int3(XN, YN, SZN[i]), SPN[i], 0);
}
#pragma omp barrier
#pragma omp parallel for
for (int i = 0; i < gpuNum; ++i)
{
// copy the volume back.
subVol[i].resize(XN * YN * SZN[i]);
thrust::copy(vol[i].begin(), vol[i].end(), subVol[i].begin());
vol[i].clear();
msk[i].clear();
cossinZT[i].clear();
hipDestroyTextureObject(texObj[i]);
hipFreeArray(d_prjArray[i]);
}
hipDeviceSynchronize();
combineVolume<float>(hvol, XN, YN, ZN, subVol, &(SZN[0]), gpuNum);
hangs.clear();
hzPos.clear();
ObjZIdx_Start.clear();
ObjZIdx_End.clear();
prjIdx_Start.clear();
prjIdx_End.clear();
SZN.clear();
subImgZCenter.clear();
SPN.clear();
sour.clear();
msk.clear();
vol.clear();
cossinZT.clear();
d_prjArray.clear();
texObj.clear();
prj.clear();
stream.clear();
COSSINZT.clear();
ANGS.clear();
ZPOS.clear();
copyGid.clear();
gid.clear();
vertGenGid.clear();
horzGenGid.clear();
}
extern "C"
void DD3Back_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* hangs, float* hzPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask, int bakMode,const int* startVOL, int gpuNum)
{
switch(bakMode)
{
case 0: // Branchless backprojection
DD3Back_branchless_sat2d_multiGPU(x0, y0, z0,
DNU, DNV, xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj,
dx, dz, mask, startVOL, gpuNum);
break;
default: // Volume Rendering backprojection
DD3Back_pseudo_multiGPU(x0, y0, z0,
DNU, DNV, xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj,
dx, dz, mask, startVOL, gpuNum);
break;
}
}
| c1adb969d3f357f84baba1bba6b52264564dad2e.cu |
#include "DD_MultiGPU_ker.h"
#include <cuda_runtime.h>
#include <vector>
#include <algorithm>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/find.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <omp.h>
#define BACK_BLKX 64
#define BACK_BLKY 4
#define BACK_BLKZ 1
#define BLKX 32
#define BLKY 8
#define BLKZ 1
#ifndef __PI__
#define __PI__
#define PI 3.141592653589793
#define PI_2 1.570796326794897
#define PI_4 0.785398163397448
#define PI_3_4 2.356194490192344
#define PI_5_4 3.926990816987241
#define PI_7_4 5.497787143782138
#define TWOPI 6.283185307179586
#endif
#define FORCEINLINE 1
#if FORCEINLINE
#define INLINE __forceinline__
#else
#define INLINE inline
#endif
#ifndef DEBUG
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
// Same function as CUDA_CHECK_RETURN
#define CUDA_SAFE_CALL(call) do{ cudaError_t err = call; if (cudaSuccess != err) { fprintf (stderr, "Cuda error in file '%s' in line %i : %s.", __FILE__, __LINE__, cudaGetErrorString(err) ); exit(EXIT_FAILURE); } } while (0)
#else
#define CUDA_CHECK_RETURN(value) {value;}
#define CUDA_SAFE_CALL(value) {value;}
#endif
typedef unsigned char byte;
#ifndef nullptr
#define nullptr NULL
#endif
INLINE __host__ __device__ const float2 operator/(const float2& a, float b)
{
return make_float2(a.x / b, a.y / b);
}
INLINE __host__ __device__ const float3 operator+(const float3& a, const float3& b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
INLINE __host__ __device__ const float3 operator-(const float3& a, const float3& b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
INLINE __host__ __device__ const float2 operator-(const float2& a, const float2& b)
{
return make_float2(a.x - b.x, a.y - b.y);
}
INLINE __host__ __device__ const float3 operator*(const float3& a, const float3& b)
{
return make_float3(a.x * b.x, a.y * b.y, a.z * b.z);
}
INLINE __host__ __device__ const float3 operator*(const float3& a, float b)
{
return make_float3(a.x * b, a.y * b, a.z * b);
}
INLINE __host__ __device__ const float3 operator/(const float3& a, const float3& b)
{
return make_float3(a.x / b.x, a.y / b.y, a.z / b.z);
}
INLINE __host__ __device__ const float3 operator/(const float3& a, float b)
{
return make_float3(a.x / b, a.y / b, a.z / b);
}
INLINE __host__ __device__ const double3 operator/(const double3& a, double b)
{
return make_double3(a.x / b, a.y / b, a.z / b);
}
INLINE __host__ __device__ const float3 operator-(const int3& a, const float3& b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
INLINE __host__ __device__ float length(const float2& a)
{
return sqrtf(a.x * a.x + a.y * a.y);
}
INLINE __host__ __device__ float length(const float3& a)
{
return sqrtf(a.x * a.x + a.y * a.y + a.z * a.z);
}
INLINE __host__ __device__ double length(const double3& a)
{
return sqrt(a.x * a.x + a.y * a.y + a.z * a.z);
}
INLINE __host__ __device__ const float2 normalize(const float2& a)
{
return a / length(a);
}
INLINE __host__ __device__ const float3 normalize(const float3& a)
{
return a / length(a);
}
INLINE __host__ __device__ const double3 normalize(const double3& a)
{
return a / length(a);
}
INLINE __host__ __device__ float fminf(const float2& a)
{
return fminf(a.x, a.y);
}
INLINE __host__ __device__ float fminf(const float3& a)
{
return fminf(a.x, fminf(a.y, a.z));
}
INLINE __host__ __device__ float fmaxf(const float2& a)
{
return fmaxf(a.x, a.y);
}
INLINE __host__ __device__ float fmaxf(const float3& a)
{
return fmaxf(a.x, fmaxf(a.y, a.z));
}
INLINE __host__ __device__ const float3 fminf(const float3& a, const float3& b)
{
return make_float3(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z));
}
INLINE __host__ __device__ const float3 fmaxf(const float3& a, const float3& b)
{
return make_float3(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z));
}
INLINE __host__ __device__ const float2 fminf(const float2& a, const float2& b)
{
return make_float2(fminf(a.x, b.x), fminf(a.y, b.y));
}
INLINE __host__ __device__ const float2 fmaxf(const float2& a, const float2& b)
{
return make_float2(fmaxf(a.x, b.x), fmaxf(a.y, b.y));
}
INLINE __host__ __device__ bool intersectBox(
const float3& sour,
const float3& dir,
const float3& boxmin,
const float3& boxmax,
float* tnear, float* tfar)
{
const float3 invR = make_float3(1.0 / dir.x, 1.0 / dir.y, 1.0 / dir.z);
const float3 tbot = invR * (boxmin - sour);
const float3 ttop = invR * (boxmax - sour);
const float3 tmin = fminf(ttop, tbot);
const float3 tmax = fmaxf(ttop, tbot);
const float largest_tmin = fmaxf(tmin);
const float smallest_tmax = fminf(tmax);
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
template<typename T>
INLINE __host__ __device__ T regularizeAngle(T curang)
{
T c = curang;
while (c >= TWOPI){ c -= TWOPI; }
while (c < 0){ c += TWOPI; }
return c;
}
INLINE __host__ __device__ void invRotVox(
const float3& curVox,
float3& virVox,
const float2& cossinT,
const float zP)
{
virVox.x = curVox.x * cossinT.x + curVox.y * cossinT.y;
virVox.y =-curVox.x * cossinT.y + curVox.y * cossinT.x;
virVox.z = curVox.z - zP;
}
INLINE __device__ float3 invRot(
const float3 inV,
const float2 cossin,
const float zP)
{
float3 outV;
outV.x = inV.x * cossin.x + inV.y * cossin.y;
outV.x =-inV.x * cossin.y + inV.y * cossin.x;
outV.z = inV.z - zP;
return outV;
}
namespace CTMBIR
{
struct ConstantForBackProjection4{
float x0;
float y0;
float z0;
typedef thrust::tuple<float, float> InTuple;
ConstantForBackProjection4(const float _x0, const float _y0, const float _z0)
: x0(_x0), y0(_y0), z0(_z0){}
__device__ float3 operator()(const InTuple& tp)
{
float curang = regularizeAngle(thrust::get<0>(tp));
float zP = thrust::get<1>(tp);
float cosT = cosf(curang);
float sinT = sinf(curang);
return make_float3(cosT, sinT, zP);
}
};
}
template<typename T>
void DD3Boundaries(int nrBoundaries, T*pCenters, T *pBoundaries)
{
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
template<typename T>
void DD3Boundaries(int nrBoundaries, std::vector<T>& pCenters, T *pBoundaries)
{
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
template<typename T>
void DD3Boundaries(int nrBoundaries,T *pCenters, std::vector<T>& pB)
{
T* pBoundaries = &pB[0];
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
template<typename T>
void DD3Boundaries(int nrBoundaries,std::vector<T>& pC, std::vector<T>& pB)
{
T* pCenters = &pC[0];
T* pBoundaries = &pB[0];
int i;
if (nrBoundaries >= 3)
{
*pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1);
for (i = 1; i <= (nrBoundaries - 2); i++)
{
*pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1);
pCenters++;
}
*pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1);
}
else
{
*pBoundaries = *pCenters - 0.5;
*(pBoundaries + 1) = *pCenters + 0.5;
}
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////
// Get one sub-volume from the whole volume.
// Assume that the volumes are stored in Z, X, Y order
template<typename T>
void getSubVolume(const T* vol,
const size_t XN, const size_t YN, const size_t ZN,
const size_t ZIdx_Start, const size_t ZIdx_End, T* subVol)
{
const size_t SZN = ZIdx_End - ZIdx_Start;
for (size_t yIdx = 0; yIdx != YN; ++yIdx)
{
for (size_t xIdx = 0; xIdx != XN; ++xIdx)
{
for (size_t zIdx = ZIdx_Start; zIdx != ZIdx_End; ++zIdx)
{
subVol[(yIdx * XN + xIdx) * SZN + (zIdx - ZIdx_Start)] = vol[(yIdx * XN + xIdx) * ZN + zIdx];
}
}
}
}
template<typename T>
void getSubVolume(const T* vol,
const size_t XYN, const size_t ZN,
const size_t ZIdx_Start, const size_t ZIdx_End, T* subVol)
{
const size_t SZN = ZIdx_End - ZIdx_Start;
for (size_t xyIdx = 0; xyIdx != XYN; ++xyIdx)
{
for (size_t zIdx = ZIdx_Start; zIdx != ZIdx_End; ++zIdx)
{
subVol[xyIdx * SZN + (zIdx - ZIdx_Start)] = vol[xyIdx * ZN + zIdx];
}
}
}
///////////////////////////////////////////////////////////////////////////////////
// For projection, before we divide the volume into serveral sub-volumes, we have
// to calculate the Z index range
template<typename T>
void getVolZIdxPair(const thrust::host_vector<T>& zPos, // Z position of the source.
//NOTE: We only assume the spiral CT case that zPos is increasing.
const size_t PrjIdx_Start, const size_t PrjIdx_End,
const T detCntIdxV, const T detStpZ, const int DNV,
const T objCntIdxZ, const T dz, const int ZN, // Size of the volume
int& ObjIdx_Start, int& ObjIdx_End) // The end is not included
{
const T lowerPart = (detCntIdxV + 0.5) * detStpZ;
const T upperPart = DNV * detStpZ - lowerPart;
const T startPos = zPos[PrjIdx_Start] - lowerPart;
const T endPos = zPos[PrjIdx_End - 1] + upperPart;
ObjIdx_Start = floor((startPos / dz) + objCntIdxZ - 1);
ObjIdx_End = ceil((endPos / dz) + objCntIdxZ + 1) + 1;
ObjIdx_Start = (ObjIdx_Start < 0) ? 0 : ObjIdx_Start;
ObjIdx_Start = (ObjIdx_Start > ZN) ? ZN : ObjIdx_Start;
ObjIdx_End = (ObjIdx_End < 0) ? 0 : ObjIdx_End;
ObjIdx_End = (ObjIdx_End > ZN) ? ZN : ObjIdx_End;
}
///////////////////////////////////////////////////////////////////////////////////
// For backprojection, after decide the subvolume range, we have to decide the
// projection range to cover the subvolume.
template<typename T>
void getPrjIdxPair(const thrust::host_vector<T>& zPos, // Z Position of the source.
// NOTE: we assume that it is pre sorted
const size_t ObjZIdx_Start, const size_t ObjZIdx_End, // sub vol range,
// NOTE: the objZIdx_End is not included
const T objCntIdxZ, const T dz, const int ZN,
const T detCntIdxV, const T detStpZ, const int DNV,
int& prjIdx_Start, int& prjIdx_End)
{
const int PN = zPos.size();
const T lowerPartV = (ObjZIdx_Start - objCntIdxZ - 0.5) * dz;
const T highrPartV = lowerPartV + (ObjZIdx_End - ObjZIdx_Start) * dz;
const T lowerPartDet = (detCntIdxV + 0.5) * detStpZ;
const T upperPartDet = DNV * detStpZ - lowerPartDet;
//The source position
const T sourLPos = lowerPartV - upperPartDet;
const T sourHPos = highrPartV + lowerPartDet;
prjIdx_Start = thrust::upper_bound(zPos.begin(),zPos.end(),sourLPos) - zPos.begin() - 1;
prjIdx_End = thrust::upper_bound(zPos.begin(),zPos.end(),sourHPos) - zPos.begin() + 2;
prjIdx_Start = (prjIdx_Start < 0) ? 0 : prjIdx_Start;
prjIdx_Start = (prjIdx_Start > PN)? PN: prjIdx_Start;
prjIdx_End = (prjIdx_End < 0) ? 0 : prjIdx_End;
prjIdx_End = (prjIdx_End > PN) ? PN : prjIdx_End;
}
////////////////////////////////////////////////////////////////////////////////////
// The volume is also stored in Z, X, Y order
// Not tested yet.
template<typename T>
void combineVolume(
T* vol, // The volume to be combined
const int XN, const int YN, const int ZN,
T** subVol, // All sub volumes
const int* SZN, // Number of slices for each subVolume
const int subVolNum) // Number of sub volumes
{
int kk = 0;
for (size_t yIdx = 0; yIdx != YN; ++yIdx)
{
for (size_t xIdx = 0; xIdx != XN; ++xIdx)
{
kk = 0;
for (size_t volIdx = 0; volIdx != subVolNum; ++volIdx)
{
for (size_t zIdx = 0; zIdx != SZN[volIdx]; ++zIdx)
{
vol[(yIdx * XN + xIdx) * ZN + kk] = subVol[volIdx][(yIdx * XN + xIdx) * SZN[volIdx] + zIdx];
kk = kk + 1;
}
}
}
}
}
template<typename T>
void combineVolume(
T* vol, // The volume to be combined
const int XN, const int YN, const int ZN,
thrust::host_vector<thrust::host_vector<float> >& subVol, // All sub volumes
const int* SZN, // Number of slices for each subVolume
const int subVolNum) // Number of sub volumes
{
int kk = 0;
for (size_t yIdx = 0; yIdx != YN; ++yIdx)
{
for (size_t xIdx = 0; xIdx != XN; ++xIdx)
{
kk = 0;
for (size_t volIdx = 0; volIdx != subVolNum; ++volIdx)
{
for (size_t zIdx = 0; zIdx != SZN[volIdx]; ++zIdx)
{
vol[(yIdx * XN + xIdx) * ZN + kk] = subVol[volIdx][(yIdx * XN + xIdx) * SZN[volIdx] + zIdx];
kk = kk + 1;
}
}
}
}
}
template<typename T>
std::vector<T> operator-(const std::vector<T>& a, const std::vector<T>& b)
{
std::vector<T> res(a.size(), 0);
std::transform(a.begin(),a.end(),b.begin(), res.begin(), [](T aa, T bb){return aa - bb;});
return res;
}
template<typename T>
__device__ inline T intersectLength(const T& fixedmin, const T& fixedmax, const T& varimin, const T& varimax)
{
const T left = (fixedmin > varimin) ? fixedmin : varimin;
const T right = (fixedmax < varimax) ? fixedmax : varimax;
return abs(right - left) * static_cast<double>(right > left);
}
template<typename Ta, typename Tb>
__global__ void naive_copyToTwoVolumes(Ta* in_ZXY,
Tb* out_ZXY, Tb* out_ZYX,
int XN, int YN, int ZN)
{
int idz = threadIdx.x + blockIdx.x * blockDim.x;
int idx = threadIdx.y + blockIdx.y * blockDim.y;
int idy = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < XN && idy < YN && idz < ZN)
{
int i = (idy * XN + idx) * ZN + idz;
int ni = (idy * (XN + 1) + (idx + 1)) * (ZN + 1) + idz + 1;
int nj = (idx * (YN + 1) + (idy + 1)) * (ZN + 1) + idz + 1;
out_ZXY[ni] = in_ZXY[i];
out_ZYX[nj] = in_ZXY[i];
}
}
template<typename Ta, typename Tb>
__global__ void naive_herizontalIntegral(Ta* in, Tb* out, int N, int ZN)
{
int zi = threadIdx.x + blockIdx.x * blockDim.x;
if (zi < ZN)
{
out[zi] = in[zi];
for (int i = 1; i < N; ++i)
{
out[i * ZN + zi] = out[(i - 1) * ZN + zi]
+ in[i * ZN + zi];
}
}
}
template<typename Ta, typename Tb>
__global__ void naive_verticalIntegral(Ta* in, Tb* out, int N, int ZN)
{
int xyi = threadIdx.x + blockIdx.x * blockDim.x;
if (xyi < N)
{
out[xyi * ZN] = in[xyi * ZN];
for (int ii = 1; ii < ZN; ++ii)
{
out[xyi * ZN + ii] = out[xyi * ZN + ii - 1]
+ in[xyi * ZN + ii];
}
}
}
template<typename T>
__global__ void verticalIntegral(T* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
template<typename T>
__global__ void horizontalIntegral(T* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPtr = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPtr + ii * DNV] = prj[headPtr + ii * DNV] + prj[headPtr + (ii - 1) * DNV];
}
}
}
__global__ void naive_vertialIntegral(double* in, int2* out, int N, int ZN)
{
int xyi = threadIdx.x + blockIdx.x * blockDim.x;
if (xyi < N)
{
double temp = in[xyi * ZN];
out[xyi * ZN] = make_int2(__double2loint(temp), __double2hiint(temp));
double temp2 = 0;
for (int ii = 0; ii < ZN; ++ii)
{
temp2 = temp + in[xyi * ZN + ii];
out[xyi * ZN + ii] = make_int2(__double2loint(temp2), __double2hiint(temp2));
temp = temp2;
}
}
}
__global__ void verticalIntegral(float* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
__global__ void horizontalIntegral(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
__global__ void DD3_gpu_proj_branchless_sat2d_ker(
cudaTextureObject_t volTex1,
cudaTextureObject_t volTex2,
float* proj,
float3 s,
const float3* __restrict__ cossinZT,
const float* __restrict__ xds,
const float* __restrict__ yds,
const float* __restrict__ zds,
const float* __restrict__ bxds,
const float* __restrict__ byds,
const float* __restrict__ bzds,
float3 objCntIdx,
float dx, float dz,
int XN, int YN, int ZN,
int DNU, int DNV, int PN)
{
int detIdV = threadIdx.x + blockIdx.x * blockDim.x;
int detIdU = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
__shared__ float _xds[BLKY];
__shared__ float _yds[BLKY];
_xds[threadIdx.y] = xds[detIdU];
_yds[threadIdx.y] = yds[detIdU];
__syncthreads();
if (detIdU < DNU && detIdV < DNV && angIdx < PN)
{
float3 dir = cossinZT[angIdx];
float3 cursour = make_float3(
s.x * dir.x - s.y * dir.y,
s.x * dir.y + s.y * dir.x,
s.z + dir.z);
s = cossinZT[angIdx];
float summ = _xds[threadIdx.y] * s.x - _yds[threadIdx.y] * s.y;
float obj = _xds[threadIdx.y] * s.y + _yds[threadIdx.y] * s.x;
float realL = bxds[detIdU];
float realR = byds[detIdU];
float realU = bxds[detIdU + 1];
float realD = byds[detIdU + 1];
float2 curDetL = make_float2(
realL * s.x - realR * s.y,
realL * s.y + realR * s.x);
float2 curDetR = make_float2(
realU * s.x - realD * s.y,
realU * s.y + realD * s.x);
float4 curDet = make_float4(
summ, obj, bzds[detIdV] + s.z,
bzds[detIdV + 1] + s.z);
dir = normalize(make_float3(summ, obj,
zds[detIdV] + s.z) - cursour);
summ = 0;
obj = 0;
float intersectLength, intersectHeight;
float invdz = 1.0 / dz;
float invdx = 1.0 / dx;
float factL(1.0f);
float factR(1.0f);
float factU(1.0f);
float factD(1.0f);
float constVal = 0;
if (fabsf(s.x) <= fabsf(s.y))
{
summ = 0;
factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x);
factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x);
factU = (curDet.w - cursour.z) / (curDet.x - cursour.x);
factD = (curDet.z - cursour.z) / (curDet.x - cursour.x);
constVal = dx * dx * dz / fabsf(dir.x);
#pragma unroll
for (int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
realL = (obj - curDetL.x) * factL + curDetL.y;
realR = (obj - curDetR.x) * factR + curDetR.y;
realU = (obj - curDet.x) * factU + curDet.w;
realD = (obj - curDet.x) * factD + curDet.z;
intersectLength = realR - realL;
intersectHeight = realU - realD;
realD = realD * invdz + objCntIdx.z + 1;
realR = realR * invdx + objCntIdx.y + 1;
realU = realU * invdz + objCntIdx.z + 1;
realL = realL * invdx + objCntIdx.y + 1;
summ +=
(
tex3D<float>(volTex2, realD, realL, ii + 0.5)
+ tex3D<float>(volTex2, realU, realR, ii + 0.5)
- tex3D<float>(volTex2, realU, realL, ii + 0.5)
- tex3D<float>(volTex2, realD, realR, ii + 0.5)
) / (intersectLength * intersectHeight);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * constVal;
}
else
{
summ = 0;
factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y);
factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y);
factU = (curDet.w - cursour.z) / (curDet.y - cursour.y);
factD = (curDet.z - cursour.z) / (curDet.y - cursour.y);
constVal = dx * dx * dz / fabsf(dir.y);
#pragma unroll
for (int jj = 0; jj < YN; ++jj)
{
obj = (jj - objCntIdx.y) * dx;
realL = (obj - curDetL.y) * factL + curDetL.x;
realR = (obj - curDetR.y) * factR + curDetR.x;
realU = (obj - curDet.y) * factU + curDet.w;
realD = (obj - curDet.y) * factD + curDet.z;
intersectLength = realR - realL;
intersectHeight = realU - realD;
realD = realD * invdz + objCntIdx.z + 1;
realR = realR * invdx + objCntIdx.x + 1;
realU = realU * invdz + objCntIdx.z + 1;
realL = realL * invdx + objCntIdx.x + 1;
summ +=
(
tex3D<float>(volTex1, realD, realL, jj + 0.5)
+ tex3D<float>(volTex1, realU, realR, jj + 0.5)
- tex3D<float>(volTex1, realU, realL, jj + 0.5)
- tex3D<float>(volTex1, realD, realR, jj + 0.5)
) / (intersectLength * intersectHeight);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * constVal;
}
}
}
__global__ void DD3_gpu_proj_pseudodistancedriven_ker(
cudaTextureObject_t volTex,
float* proj, float3 s,
float* d_xds, float* d_yds, float* d_zds,
float3* cossinT,
float3 objCntIdx,
float dx, float dz,
int XN, int YN,
int DNU, int DNV, int PN)
{
int detIdV = threadIdx.x + blockIdx.x * blockDim.x;
int detIdU = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
if (detIdV < DNV && detIdU < DNU && angIdx < PN)
{
float3 cossin = cossinT[angIdx];
float3 cursour = make_float3(
s.x * cossin.x - s.y * cossin.y,
s.x * cossin.y + s.y * cossin.x,
s.z + cossin.z);
float summ = d_xds[detIdU];
float obj = d_yds[detIdU];
float idx = d_zds[detIdV];
float3 curDet = make_float3(
summ * cossin.x - obj * cossin.y,
summ * cossin.y + obj * cossin.x,
idx + cossin.z);
float3 dir = normalize(curDet - cursour);
summ = 0;
obj = 0;
float idxZ;
if (fabsf(cossin.x) <= fabsf(cossin.y))
{
summ = 0;
for (int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
idx = (obj - curDet.x) / dir.x * dir.y + curDet.y;
idxZ = (obj - curDet.x) / dir.x * dir.z + curDet.z;
idx = idx / dx + objCntIdx.y + 0.5;
idxZ = idxZ / dz + objCntIdx.z + 0.5;
summ += tex3D<float>(volTex, idxZ, ii + 0.5f, idx);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * dx / fabsf(dir.x);
}
else
{
summ = 0;
for (int jj = 0; jj != YN; ++jj)
{
obj = (jj - objCntIdx.y) * dx;
idx = (obj - curDet.y) / dir.y * dir.x + curDet.x;
idxZ = (obj - curDet.y) / dir.y * dir.z + curDet.z;
idx = idx / dx + objCntIdx.x + 0.5;
idxZ = idxZ / dz + objCntIdx.z + 0.5;
summ += tex3D<float>(volTex, idxZ, idx, jj + 0.5f);
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * DNV + detIdV] = summ * dx / fabsf(dir.y);
}
}
}
////Use the split-collect method to do the projection
//void DD3ProjHelical_3GPU(
// float x0, float y0, float z0,
// int DNU, int DNV,
// float* xds, float* yds, float* zds,
// float imgXCenter, float imgYCenter, float imgZCenter,
// float* hangs, float* hzPos, int PN,
// int XN, int YN, int ZN,
// float* hvol, float* hprj,
// float dx, float dz,
// byte* mask, int methodId, int (&startPN)[3])
//{
//
//}
// Divide three sub volumes.
template<typename T>
void GenSubVols(
int* ObjIdx_Start,
int* ObjIdx_End,
int* SZN,
T** subVol,
T* subImgZCenter,
const int subVolN,
const int* PrjIdx_Start,
const int* PrjIdx_End,
const T detCntIdxV,
const T detStpZ,
const T objCntIdxZ,
const T dz,
const int ZN,
const int DNV,
const T imgZCenter,
const int PN,
const int XN,
const int YN,
const T* hvol,
const T* hzPos)
{
if (nullptr == ObjIdx_Start)
{
ObjIdx_Start = new int[subVolN];
}
if(nullptr == ObjIdx_End)
{
ObjIdx_End = new int[subVolN];
}
if(nullptr == SZN)
{
SZN = new int[subVolN];
}
if(nullptr == subVol)
{
subVol = new float*[subVolN];
}
if(nullptr == subImgZCenter)
{
subImgZCenter = new float[subVolN];
}
thrust::host_vector<T> h_zPos(hzPos, hzPos + PN);
omp_set_num_threads(subVolN);
#pragma omp parallel for
for(int i = 0; i < subVolN; ++i) //The last one has problem!!!!!!!!!!
{
getVolZIdxPair<T>(h_zPos,PrjIdx_Start[i],PrjIdx_End[i],
detCntIdxV, detStpZ, DNV, objCntIdxZ, dz, ZN, ObjIdx_Start[i], ObjIdx_End[i]);
std::cout<<i<<" "<<ObjIdx_Start[i]<<" "<<ObjIdx_End[i]<<"\n";
SZN[i] = ObjIdx_End[i] - ObjIdx_Start[i];
subVol[i] = new T[XN * YN * SZN[i]];
//Divide the volume
getSubVolume<T>(hvol, XN * YN, ZN, ObjIdx_Start[i], ObjIdx_End[i], subVol[i]);
//Calculate the corresponding center position
subImgZCenter[i] = ((ObjIdx_End[i] + ObjIdx_Start[i] - (ZN - 1.0)) * dz + imgZCenter * 2.0) / 2.0;
}
}
template<typename T>
void DelSubVols(
int* ObjIdx_Start,
int* ObjIdx_End,
int* SZN,
T** subVol,
T* subImgZCenter, const int subVolN)
{
for(int i = 0; i != subVolN; ++i)
{
delete[] subVol[i];
}
delete[] subVol;
delete[] subImgZCenter;
delete[] ObjIdx_Start;
delete[] ObjIdx_End;
delete[] SZN;
}
void DD3_gpu_proj_pseudodistancedriven_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startPN, int gpuNum)
{
thrust::host_vector<float> hangs(h_angs, h_angs + PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos + PN);
// Mask the volume
for (int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for (int z = 0; z != ZN; ++z)
{
hvol[i * ZN + z] = hvol[i * ZN + z] * v;
}
}
// Calculate the boundary positions
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
const float objCntIdxZ = (ZN - 1.0) * 0.5 - imgZCenter / dz;
// Divide the volume into sub volumes with overlaps according to the startPN
std::vector<int> ObjIdx_Start(gpuNum, -1);
std::vector<int> ObjIdx_End(gpuNum, -1);
std::vector<int> PrjIdx_Start(startPN, startPN+gpuNum);
std::vector<int> PrjIdx_End(gpuNum, 0);
std::copy(PrjIdx_Start.begin()+1, PrjIdx_Start.end(), PrjIdx_End.begin());
PrjIdx_End[gpuNum - 1] = PN;
std::vector<int> SPN = PrjIdx_End - PrjIdx_Start;
std::vector<int> prefixSPN = SPN;
thrust::exclusive_scan(prefixSPN.begin(), prefixSPN.end(), prefixSPN.begin());
//std::cout<<"prefixSPN are "<<prefixSPN[0]<<" "<<prefixSPN[1]<<" "<<prefixSPN[2]<<"\n";
std::vector<int> SZN(gpuNum, 0); // The slices number of each sub volume
const float detStpZ = (zds[DNV-1] - zds[0]) / (DNV - 1); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector center along Z direction
std::vector<std::vector<float> > subVol(gpuNum); // Used to store three sub volumes
std::vector<float> subImgZCenter(gpuNum, 0); // the center of three sub volumes
// Generate multiple streams;
std::vector<cudaStream_t> stream(gpuNum);
std::vector<int> siz(gpuNum, 0);
std::vector<cudaExtent> volumeSize(gpuNum);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
std::vector<cudaArray*> d_volumeArray(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_zds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > angs(gpuNum);
thrust::host_vector<thrust::device_vector<float> > zPos(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
dim3 blk(64, 16, 1);
std::vector<dim3> gid(gpuNum);
std::vector<cudaTextureObject_t> texObj(gpuNum);
// First we define the main framework to see how it works.
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
getVolZIdxPair<float>(hzPos, PrjIdx_Start[i], PrjIdx_End[i],
detCntIdxV, detStpZ, DNV, objCntIdxZ, dz, ZN, ObjIdx_Start[i],
ObjIdx_End[i]);
//std::cout<<i<<" "<<ObjIdx_Start[i]<<" "<<ObjIdx_End[i]<<"\n";
SZN[i] = ObjIdx_End[i] - ObjIdx_Start[i];
subVol[i].resize(XN * YN * SZN[i]);
// Divide the volume into multiple sets
getSubVolume<float>(hvol, XN * YN, ZN, ObjIdx_Start[i], ObjIdx_End[i], &(subVol[i][0]));
// NOTE: The explanation will be later:
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjIdx_Start[i] - 0.5f;
CUDA_SAFE_CALL(cudaSetDevice(i));
// For each GPU generate two streams
CUDA_SAFE_CALL(cudaStreamCreate(&stream[i]));
siz[i] = XN * YN * SZN[i];
d_vol[i].resize(siz[i]);
d_vol[i] = subVol[i];
subVol[i].clear();
volumeSize[i].width = SZN[i];
volumeSize[i].height = XN;
volumeSize[i].depth = YN;
CUDA_SAFE_CALL(cudaMalloc3DArray(&d_volumeArray[i], &channelDesc, volumeSize[i]));
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)
thrust::raw_pointer_cast(&d_vol[i][0]),
volumeSize[i].width * sizeof(float),
volumeSize[i].width, volumeSize[i].height);
copyParams.dstArray = d_volumeArray[i];
copyParams.extent = volumeSize[i];
copyParams.kind = cudaMemcpyDeviceToDevice;
CUDA_SAFE_CALL(cudaMemcpy3DAsync(©Params,stream[i]));
d_vol[i].clear();
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_volumeArray[i];
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.addressMode[2] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
texObj[i] = 0;
CUDA_SAFE_CALL(cudaCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr));
prj[i].resize(DNU * DNV * SPN[i]); // Change here
d_xds[i].resize(DNU);
d_yds[i].resize(DNU);
d_zds[i].resize(DNV);
thrust::copy(xds,xds+DNU,d_xds[i].begin());
thrust::copy(yds,yds+DNU,d_yds[i].begin());
thrust::copy(zds,zds+DNV,d_zds[i].begin());
angs[i].resize(SPN[i]);
zPos[i].resize(SPN[i]);
thrust::copy(hangs.begin() + PrjIdx_Start[i],
hangs.begin() + PrjIdx_Start[i] + SPN[i],
angs[i].begin());
thrust::copy(hzPos.begin() + PrjIdx_Start[i],
hzPos.begin() + PrjIdx_Start[i] + SPN[i],
zPos[i].begin());
cossinZT[i].resize(PN);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(angs[i].begin(), zPos[i].begin())),
thrust::make_zip_iterator(thrust::make_tuple(angs[i].end(), zPos[i].end())),
cossinZT[i].begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
angs[i].clear();
zPos[i].clear();
gid[i].x = (DNV + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (SPN[i] + blk.z - 1) / blk.z;
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
DD3_gpu_proj_pseudodistancedriven_ker<< <gid[i], blk, 0, stream[i]>> >(
texObj[i], thrust::raw_pointer_cast(&prj[i][0]),
make_float3(x0, y0, z0),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_zds[i][0]),
thrust::raw_pointer_cast(&cossinZT[i][0]),
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]),
dx, dz, XN, YN, DNU, DNV, SPN[i]);
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
CUDA_SAFE_CALL(cudaMemcpyAsync(hprj + DNU * DNV * prefixSPN[i],
thrust::raw_pointer_cast(&prj[i][0]), sizeof(float) * DNU * DNV * SPN[i],
cudaMemcpyDeviceToHost,stream[i]));
d_xds[i].clear();
d_yds[i].clear();
d_zds[i].clear();
cossinZT[i].clear();
prj[i].clear();
CUDA_SAFE_CALL(cudaDestroyTextureObject(texObj[i]));
CUDA_SAFE_CALL(cudaFreeArray(d_volumeArray[i]));
//CUDA_SAFE_CALL(cudaStreamDestroy(stream[i*2]));
//CUDA_SAFE_CALL(cudaStreamDestroy(stream[i*2 + 1]));
}
// Delete the vectors;
hangs.clear();
hzPos.clear();
ObjIdx_Start.clear();
ObjIdx_End.clear();
PrjIdx_Start.clear();
PrjIdx_End.clear();
SPN.clear();
prefixSPN.clear();
SZN.clear();
subVol.clear();
subImgZCenter.clear();
stream.clear();
siz.clear();
volumeSize.clear();
d_volumeArray.clear();
d_vol.clear();
prj.clear();
d_xds.clear();
d_yds.clear();
d_zds.clear();
angs.clear();
zPos.clear();
cossinZT.clear();
gid.clear();
}
void DD3_gpu_proj_branchless_sat2d_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startPN, int gpuNum)
{
thrust::host_vector<float> hangs(h_angs, h_angs+PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos+PN);
for (int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for (int z = 0; z != ZN; ++z)
{
hvol[i * ZN + z] = hvol[i * ZN + z] * v;
}
}
// Calculate the boundary positions
std::vector<float> bxds(DNU + 1, 0.0f);
std::vector<float> byds(DNU + 1, 0.0f);
std::vector<float> bzds(DNV + 1, 0.0f);
DD3Boundaries<float>(DNU + 1, xds, bxds);
DD3Boundaries<float>(DNU + 1, yds, byds);
DD3Boundaries<float>(DNV + 1, zds, bzds);
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
const float objCntIdxZ = (ZN - 1.0) * 0.5 - imgZCenter / dz;
// Divide the volume into sub volumes with overlaps according to the startPN
std::vector<int> ObjIdx_Start(gpuNum, -1);
std::vector<int> ObjIdx_End(gpuNum, -1);
std::vector<int> PrjIdx_Start(startPN, startPN+gpuNum);
std::vector<int> PrjIdx_End(gpuNum, 0);
std::copy(PrjIdx_Start.begin()+1, PrjIdx_Start.end(), PrjIdx_End.begin());
PrjIdx_End[gpuNum - 1] = PN;
std::vector<int> SPN = PrjIdx_End - PrjIdx_Start;
std::vector<int> prefixSPN = SPN;
thrust::exclusive_scan(prefixSPN.begin(), prefixSPN.end(), prefixSPN.begin());
//std::cout<<"prefixSPN are "<<prefixSPN[0]<<" "<<prefixSPN[1]<<" "<<prefixSPN[2]<<"\n";
std::vector<int> SZN(gpuNum, 0); // The slices number of each sub volume
const float detStpZ = (zds[DNV-1] - zds[0]) / (DNV - 1); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector center along Z direction
std::vector<std::vector<float> > subVol(gpuNum); // Used to store three sub volumes
std::vector<float> subImgZCenter(gpuNum, 0); // the center of three sub volumes
// Generate multiple streams;
std::vector<cudaStream_t> stream(gpuNum * 2);
std::vector<int> siz(gpuNum, 0);
std::vector<int> nsiz_ZXY(gpuNum, 0);
std::vector<int> nsiz_ZYX(gpuNum, 0);
std::vector<int> nZN(gpuNum,0);
const int nXN = XN + 1;
const int nYN = YN + 1;
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_ZXY(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_ZYX(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum); // Change here
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_zds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_bxds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_byds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_bzds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > angs(gpuNum);
thrust::host_vector<thrust::device_vector<float> > zPos(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
// Copy to three volumes
dim3 copyblk(64, 16, 1);
std::vector<dim3> copygid(gpuNum);
dim3 satblk1(32,1,1);
dim3 satblk2(64,16,1);
dim3 satgid1_1((nXN * YN + satblk1.x - 1) / satblk1.x, 1, 1);
dim3 satgid1_2((nYN * XN + satblk1.x - 1) / satblk1.x, 1, 1);
std::vector<dim3> satgid2_1(gpuNum);
std::vector<dim3> satgid2_2(gpuNum);
dim3 blk(BLKX, BLKY, BLKZ);
std::vector<dim3> gid(gpuNum);
std::vector<cudaExtent> volumeSize1(gpuNum);
std::vector<cudaExtent> volumeSize2(gpuNum);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
std::vector<cudaArray*> d_volumeArray1(gpuNum);
std::vector<cudaArray*> d_volumeArray2(gpuNum);
std::vector<cudaTextureObject_t> texObj1(gpuNum);
std::vector<cudaTextureObject_t> texObj2(gpuNum);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
getVolZIdxPair<float>(hzPos, PrjIdx_Start[i], PrjIdx_End[i],
detCntIdxV, detStpZ, DNV, objCntIdxZ, dz, ZN, ObjIdx_Start[i],
ObjIdx_End[i]);
SZN[i] = ObjIdx_End[i] - ObjIdx_Start[i];
subVol[i].resize(XN * YN * SZN[i]);
// Divide the volume into multiple sets
getSubVolume<float>(hvol, XN * YN, ZN, ObjIdx_Start[i], ObjIdx_End[i], &(subVol[i][0]));
// NOTE: How it comes
// We need to calculate the (ii - subImgZCenter[i]) * dz to define the
// real physical position of the voxel.
// Assume that the physical center of the whole volume is imgZCenter
// The minimum lower position of the volume is imgZCenter - dz * N / 2;
// Then the corresponding physical lower boundary position of ObjIdx_Start[i]
// is --> imgZCenter - dz * N / 2 + ObjIdx_Start[i] * dz
// while the corresponding physical center position of layer ObjIdx_Start[i]
// is --> imgZCenter - dz * N / 2 + ObjIdx_Start[i] * dz + 0.5 * dz
// We need when ii==0 --> (ii - subImgZCenter[i]) * dz = imgZCenter - dz * N / 2 + ObjIdx_Start[i] * dz + 0.5 * dz
// It means subImgZCenter[i] = -imgZCenter / dz + N / 2 - ObjIdx_Start[i] - 0.5;
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjIdx_Start[i] - 0.5f;
CUDA_SAFE_CALL(cudaSetDevice(i));
// For each GPU generate two streams
CUDA_SAFE_CALL(cudaStreamCreate(&stream[i * 2]));
CUDA_SAFE_CALL(cudaStreamCreate(&stream[i * 2 + 1]));
siz[i] = XN * YN * SZN[i];
nZN[i] = SZN[i] + 1;
nsiz_ZXY[i] = nZN[i] * nXN * YN;
nsiz_ZYX[i] = nZN[i] * nYN * XN;
d_ZXY[i].resize(nsiz_ZXY[i]);
d_ZYX[i].resize(nsiz_ZYX[i]);
d_vol[i].resize(siz[i]);
d_vol[i] = subVol[i];
subVol[i].clear();
copygid[i].x = (SZN[i] + copyblk.x - 1) / copyblk.x;
copygid[i].y = (XN + copyblk.y - 1) / copyblk.y;
copygid[i].z = (YN + copyblk.z - 1) / copyblk.z;
naive_copyToTwoVolumes << <copygid[i], copyblk, 0, stream[2 * i] >> >(
thrust::raw_pointer_cast(&d_vol[i][0]),
thrust::raw_pointer_cast(&d_ZXY[i][0]),
thrust::raw_pointer_cast(&d_ZYX[i][0]),
XN,YN,SZN[i]);
CUDA_SAFE_CALL(cudaStreamSynchronize(stream[2 * i]));
CUDA_SAFE_CALL(cudaStreamSynchronize(stream[2 * i + 1]));
d_vol[i].clear();
// Generate the SAT for two volumes
satgid2_1[i].x = (nZN[i] + satblk2.x - 1) / satblk2.x;
satgid2_1[i].y = (YN + satblk2.y - 1) / satblk2.y;
satgid2_1[i].z = 1;
satgid2_2[i].x = (nZN[i] + satblk2.x - 1) / satblk2.x;
satgid2_2[i].y = (XN + satblk2.y - 1) / satblk2.y;
satgid2_2[i].z = 1;
verticalIntegral << <satgid1_1, satblk1, 0, stream[2 * i] >> >(
thrust::raw_pointer_cast(&d_ZXY[i][0]), nZN[i], nXN * YN);
horizontalIntegral << <satgid2_1[i], satblk2, 0, stream[2 * i] >> >(
thrust::raw_pointer_cast(&d_ZXY[i][0]), nXN, nZN[i], YN);
verticalIntegral << <satgid1_2, satblk1, 0, stream[2 * i + 1] >> >(
thrust::raw_pointer_cast(&d_ZYX[i][0]), nZN[i], nYN * XN);
horizontalIntegral << <satgid2_2[i], satblk2, 0, stream[2 * i + 1] >> >(
thrust::raw_pointer_cast(&d_ZYX[i][0]), nYN, nZN[i], XN);
//Bind to the texture;
volumeSize1[i].width = nZN[i];
volumeSize1[i].height = nXN;
volumeSize1[i].depth = YN;
volumeSize2[i].width = nZN[i];
volumeSize2[i].height = nYN;
volumeSize2[i].depth = XN;
CUDA_SAFE_CALL(cudaMalloc3DArray(&d_volumeArray1[i], &channelDesc, volumeSize1[i]));
CUDA_SAFE_CALL(cudaMalloc3DArray(&d_volumeArray2[i], &channelDesc, volumeSize2[i]));
cudaMemcpy3DParms copyParams1 = { 0 };
copyParams1.srcPtr = make_cudaPitchedPtr((void*)
thrust::raw_pointer_cast(&d_ZXY[i][0]),
volumeSize1[i].width * sizeof(float),
volumeSize1[i].width, volumeSize1[i].height);
copyParams1.dstArray = d_volumeArray1[i];
copyParams1.extent = volumeSize1[i];
copyParams1.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3DParms copyParams2 = { 0 };
copyParams2.srcPtr = make_cudaPitchedPtr((void*)
thrust::raw_pointer_cast(&d_ZYX[i][0]),
volumeSize2[i].width * sizeof(float),
volumeSize2[i].width, volumeSize2[i].height);
copyParams2.dstArray = d_volumeArray2[i];
copyParams2.extent = volumeSize2[i];
copyParams2.kind = cudaMemcpyDeviceToDevice;
CUDA_SAFE_CALL(cudaMemcpy3DAsync(©Params1,stream[2 * i]));
CUDA_SAFE_CALL(cudaMemcpy3DAsync(©Params2,stream[2 * i + 1]));
d_ZXY[i].clear();
d_ZYX[i].clear();
cudaResourceDesc resDesc1;
cudaResourceDesc resDesc2;
memset(&resDesc1, 0, sizeof(resDesc1));
memset(&resDesc2, 0, sizeof(resDesc2));
resDesc1.resType = cudaResourceTypeArray;
resDesc2.resType = cudaResourceTypeArray;
resDesc1.res.array.array = d_volumeArray1[i];
resDesc2.res.array.array = d_volumeArray2[i];
cudaTextureDesc texDesc1;
cudaTextureDesc texDesc2;
memset(&texDesc1, 0, sizeof(texDesc1));
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc1.addressMode[0] = cudaAddressModeClamp;
texDesc1.addressMode[1] = cudaAddressModeClamp;
texDesc1.addressMode[2] = cudaAddressModeClamp;
texDesc2.addressMode[0] = cudaAddressModeClamp;
texDesc2.addressMode[1] = cudaAddressModeClamp;
texDesc2.addressMode[2] = cudaAddressModeClamp;
texDesc1.filterMode = cudaFilterModeLinear;
texDesc2.filterMode = cudaFilterModeLinear;
texDesc1.readMode = cudaReadModeElementType;
texDesc2.readMode = cudaReadModeElementType;
texDesc1.normalizedCoords = false;
texDesc2.normalizedCoords = false;
texObj1[i] = 0;
texObj2[i] = 0;
CUDA_SAFE_CALL(cudaCreateTextureObject(&texObj1[i], &resDesc1, &texDesc1, nullptr));
CUDA_SAFE_CALL(cudaCreateTextureObject(&texObj2[i], &resDesc2, &texDesc2, nullptr));
prj[i].resize(DNU * DNV * SPN[i]); // Change here
d_xds[i].resize(DNU);
d_yds[i].resize(DNU);
d_zds[i].resize(DNV);
thrust::copy(xds,xds+DNU,d_xds[i].begin());
thrust::copy(yds,yds+DNU,d_yds[i].begin());
thrust::copy(zds,zds+DNV,d_zds[i].begin());
d_bxds[i].resize(bxds.size());
d_bxds[i] = bxds;
d_byds[i].resize(byds.size());
d_byds[i] = byds;
d_bzds[i].resize(bzds.size());
d_bzds[i] = bzds;
angs[i].resize(SPN[i]);
zPos[i].resize(SPN[i]);
thrust::copy(hangs.begin() + PrjIdx_Start[i],
hangs.begin() + PrjIdx_Start[i] + SPN[i],
angs[i].begin());
thrust::copy(hzPos.begin() + PrjIdx_Start[i],
hzPos.begin() + PrjIdx_Start[i] + SPN[i],
zPos[i].begin());
cossinZT[i].resize(PN);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(angs[i].begin(), zPos[i].begin())),
thrust::make_zip_iterator(thrust::make_tuple(angs[i].end(), zPos[i].end())),
cossinZT[i].begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
angs[i].clear();
zPos[i].clear();
gid[i].x = (DNV + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (SPN[i] + blk.z - 1) / blk.z;
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
DD3_gpu_proj_branchless_sat2d_ker << <gid[i], blk, 0, stream[i * 2]>> >(
texObj1[i], texObj2[i],
thrust::raw_pointer_cast(&prj[i][0]),
make_float3(x0, y0, z0),
thrust::raw_pointer_cast(&cossinZT[i][0]),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_zds[i][0]),
thrust::raw_pointer_cast(&d_bxds[i][0]),
thrust::raw_pointer_cast(&d_byds[i][0]),
thrust::raw_pointer_cast(&d_bzds[i][0]),
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]),
dx, dz, XN, YN, ZN, DNU, DNV, SPN[i]);
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
CUDA_SAFE_CALL(cudaMemcpyAsync(hprj + DNU * DNV * prefixSPN[i],
thrust::raw_pointer_cast(&prj[i][0]), sizeof(float) * DNU * DNV * SPN[i],
cudaMemcpyDeviceToHost,stream[2*i]));
d_xds[i].clear();
d_yds[i].clear();
d_zds[i].clear();
d_bxds[i].clear();
d_byds[i].clear();
d_bzds[i].clear();
cossinZT[i].clear();
prj[i].clear();
CUDA_SAFE_CALL(cudaDestroyTextureObject(texObj1[i]));
CUDA_SAFE_CALL(cudaDestroyTextureObject(texObj2[i]));
CUDA_SAFE_CALL(cudaFreeArray(d_volumeArray1[i]));
CUDA_SAFE_CALL(cudaFreeArray(d_volumeArray2[i]));
}
// Clear the vectors
hangs.clear();
hzPos.clear();
bxds.clear();
byds.clear();
bzds.clear();
ObjIdx_Start.clear();
ObjIdx_End.clear();
PrjIdx_Start.clear();
PrjIdx_End.clear();
SPN.clear();
prefixSPN.clear();
SZN.clear();
subVol.clear();
subImgZCenter.clear();
stream.clear();
siz.clear();
nsiz_ZXY.clear();
nsiz_ZYX.clear();
nZN.clear();
d_vol.clear();
d_ZXY.clear();
d_ZYX.clear();
prj.clear();
d_xds.clear();
d_yds.clear();
d_zds.clear();
d_bxds.clear();
d_byds.clear();
d_bzds.clear();
angs.clear();
zPos.clear();
cossinZT.clear();
copygid.clear();
satgid2_1.clear();
satgid2_2.clear();
gid.clear();
volumeSize1.clear();
volumeSize2.clear();
d_volumeArray1.clear();
d_volumeArray2.clear();
}
extern "C"
void DD3Proj_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* hangs, float* hzPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask, int prjMode, const int* startPN, int gpuNum)
{
switch(prjMode)
{
case 0: // Branchless DD model based multi-GPU projection
DD3_gpu_proj_branchless_sat2d_multiGPU(x0, y0, z0, DNU, DNV,
xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj, dx, dz,
mask, startPN, gpuNum);
break;
default: // Pseudo DD based multi-GPUs projection
DD3_gpu_proj_pseudodistancedriven_multiGPU(x0, y0, z0, DNU, DNV,
xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj, dx, dz,
mask, startPN, gpuNum);
break;
}
}
enum BackProjectionMethod{ _BRANCHLESS, _PSEUDODD, _ZLINEBRANCHLESS, _VOLUMERENDERING };
#ifndef CALDETPARAS
#define CALDETPARAS
float4 calDetParas(float* xds, float* yds, float* zds, float x0, float y0, float z0, int DNU, int DNV)
{
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
float* bzds = new float[DNV + 1];
DD3Boundaries(DNU + 1, xds, bxds);
DD3Boundaries(DNU + 1, yds, byds);
DD3Boundaries(DNV + 1, zds, bzds);
float ddv = (bzds[DNV] - bzds[0]) / DNV;
float detCtrIdxV = (-(bzds[0] - z0) / ddv) - 0.5;
float2 dir = normalize(make_float2(-x0, -y0));
float2 dirL = normalize(make_float2(bxds[0] - x0, byds[0] - y0));
float2 dirR = normalize(make_float2(bxds[DNU] - x0, byds[DNU] - y0));
float dbeta = asin(dirL.x * dirR.y - dirL.y * dirR.x) / DNU;
float minBeta = asin(dir.x * dirL.y - dir.y * dirL.x);
float detCtrIdxU = -minBeta / dbeta - 0.5;
delete [] bxds;
delete [] byds;
delete [] bzds;
return make_float4(detCtrIdxU, detCtrIdxV, dbeta, ddv);
}
#endif
__global__ void addTwoSidedZeroBoarder(float* prjIn, float* prjOut,
const int DNU, const int DNV, const int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < DNV && pn < PN)
{
int inIdx = (pn * DNU + idu) * DNV + idv;
int outIdx = (pn * (DNU + 2) + (idu + 1)) * (DNV + 2) + idv + 1;
prjOut[outIdx] = prjIn[inIdx];
}
}
__global__ void addOneSidedZeroBoarder(const float* prj_in, float* prj_out, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < DNV && pn < PN)
{
int i = (pn * DNU + idu) * DNV + idv;
int ni = (pn * (DNU + 1) + (idu + 1)) * (DNV + 1) + idv + 1;
prj_out[ni] = prj_in[i];
}
}
__global__ void verticalIntegral2(float* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
__global__ void heorizontalIntegral2(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
thrust::device_vector<float> genSAT_of_Projection(
float* hprj,
int DNU, int DNV, int PN)
{
const int siz = DNU * DNV * PN;
const int nsiz = (DNU + 1) * (DNV + 1) * PN;
thrust::device_vector<float> prjSAT(nsiz, 0);
thrust::device_vector<float> prj(hprj, hprj + siz);
dim3 copyBlk(64, 16, 1);
dim3 copyGid(
(DNV + copyBlk.x - 1) / copyBlk.x,
(DNU + copyBlk.y - 1) / copyBlk.y,
(PN + copyBlk.z - 1) / copyBlk.z);
addOneSidedZeroBoarder << <copyGid, copyBlk >> >(
thrust::raw_pointer_cast(&prj[0]),
thrust::raw_pointer_cast(&prjSAT[0]),
DNU, DNV, PN);
const int nDNU = DNU + 1;
const int nDNV = DNV + 1;
copyBlk.x = 512;
copyBlk.y = 1;
copyBlk.z = 1;
copyGid.x = (nDNU * PN + copyBlk.x - 1) / copyBlk.x;
copyGid.y = 1;
copyGid.z = 1;
verticalIntegral2 << <copyGid, copyBlk >> >(
thrust::raw_pointer_cast(&prjSAT[0]),
nDNV, nDNU * PN);
copyBlk.x = 64;
copyBlk.y = 16;
copyBlk.z = 1;
copyGid.x = (nDNV + copyBlk.x - 1) / copyBlk.x;
copyGid.y = (PN + copyBlk.y - 1) / copyBlk.y;
copyGid.z = 1;
heorizontalIntegral2 << <copyGid, copyBlk >> >(
thrust::raw_pointer_cast(&prjSAT[0]),
nDNU, nDNV, PN);
return prjSAT;
}
void createTextureObject(
cudaTextureObject_t& texObj,
cudaArray* d_prjArray,
int Width, int Height, int Depth,
float* sourceData,
cudaMemcpyKind memcpyKind,
cudaTextureAddressMode addressMode,
cudaTextureFilterMode textureFilterMode,
cudaTextureReadMode textureReadMode,
bool isNormalized)
{
cudaExtent prjSize;
prjSize.width = Width;
prjSize.height = Height;
prjSize.depth = Depth;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_prjArray, &channelDesc, prjSize);
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr(
(void*) sourceData, prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray;
copyParams.extent = prjSize;
copyParams.kind = memcpyKind;
cudaMemcpy3D(©Params);
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_prjArray;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = addressMode;
texDesc.addressMode[1] = addressMode;
texDesc.addressMode[2] = addressMode;
texDesc.filterMode = textureFilterMode;
texDesc.readMode = textureReadMode;
texDesc.normalizedCoords = isNormalized;
cudaCreateTextureObject(&texObj, &resDesc, &texDesc, nullptr);
}
void destroyTextureObject(cudaTextureObject_t& texObj, cudaArray* d_array)
{
cudaDestroyTextureObject(texObj);
cudaFreeArray(d_array);
}
template < BackProjectionMethod METHOD >
__global__ void DD3_gpu_back_ker(
cudaTextureObject_t prjTexObj,
float* vol,
const byte* __restrict__ msk,
const float3* __restrict__ cossinT,
float3 s,
float S2D,
float3 curvox,
float dx, float dz,
float dbeta, float ddv,
float2 detCntIdx,
int3 VN,
int PN, int squared)
{}
template<>
__global__ void DD3_gpu_back_ker<_BRANCHLESS>(
cudaTextureObject_t prjTexObj,
float* vol,
const byte* __restrict__ msk,
const float3* __restrict__ cossinT,
float3 s,
float S2D,
float3 curvox,
float dx, float dz,
float dbeta, float ddv,
float2 detCntIdx,
int3 VN,
int PN, int squared)
{
int3 id;
id.z = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
id.x = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
id.y = threadIdx.z + __umul24(blockIdx.z, blockDim.z);
if (id.x < VN.x && id.y < VN.y && id.z < VN.z)
{
if (msk[id.y * VN.x + id.x] != 1)
return;
curvox = (id - curvox) * make_float3(dx, dx, dz);
float3 cursour;
float idxL, idxR, idxU, idxD;
float cosVal;
float summ = 0;
float3 cossin;
float inv_sid = 1.0 / sqrtf(s.x * s.x + s.y * s.y);
float3 dir;
float l_square;
float l;
float alpha;
float deltaAlpha;
S2D = S2D / ddv;
dbeta = 1.0 / dbeta;
dz = dz * 0.5;
for (int angIdx = 0; angIdx < PN; ++angIdx)
{
cossin = cossinT[angIdx];
cursour = make_float3(
s.x * cossin.x - s.y * cossin.y,
s.x * cossin.y + s.y * cossin.x,
s.z + cossin.z);
dir = curvox - cursour;
l_square = dir.x * dir.x + dir.y * dir.y;
l = rsqrtf(l_square);
idxU = (dir.z + dz) * S2D * l + detCntIdx.y + 1;
idxD = (dir.z - dz) * S2D * l + detCntIdx.y + 1;
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * inv_sid * l);
if (fabsf(cursour.x) > fabsf(cursour.y))
{
ddv = dir.x;
}
else
{
ddv = dir.y;
}
deltaAlpha = ddv / l_square * dx * 0.5;
cosVal = dx / ddv * sqrtf(l_square + dir.z * dir.z);
idxL = (alpha - deltaAlpha) * dbeta + detCntIdx.x + 1;
idxR = (alpha + deltaAlpha) * dbeta + detCntIdx.x + 1;
summ +=
(-tex3D<float>(prjTexObj, idxD, idxR, angIdx + 0.5)
- tex3D<float>(prjTexObj, idxU, idxL, angIdx + 0.5)
+ tex3D<float>(prjTexObj, idxD, idxL, angIdx + 0.5)
+ tex3D<float>(prjTexObj, idxU, idxR, angIdx + 0.5)) * cosVal;
}
__syncthreads();
vol[__umul24((__umul24(id.y, VN.x) + id.x), VN.z) + id.z] = summ;
}
}
template<>
__global__ void DD3_gpu_back_ker<_PSEUDODD>(
cudaTextureObject_t texObj,
float* vol,
const byte* __restrict__ msk,
const float3* __restrict__ cossinZT,
float3 s,
float S2D,
float3 objCntIdx,
float dx, float dz, float dbeta, float ddv,
float2 detCntIdx,
int3 VN, int PN, int squared)
{
int k = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int i = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
int j = __mul24(blockIdx.z, blockDim.z) + threadIdx.z;
if (i < VN.x && j < VN.y && k < VN.z)
{
if (msk[j * VN.x + i] != 1)
return;
float3 curVox = make_float3(
(i - objCntIdx.x) * dx,
(j - objCntIdx.y) * dx,
(k - objCntIdx.z) * dz);
float3 dir;
float3 cursour;
float invsid = rsqrtf(s.x * s.x + s.y * s.y);
float invl;
float idxZ;
float idxXY;
float alpha;
float cosVal;
float3 cossinT;
float summ = 0;
S2D = S2D / ddv;
dbeta = 1.0 / dbeta;
for (int angIdx = 0; angIdx != PN; ++angIdx)
{
cossinT = cossinZT[angIdx];
cursour = make_float3(
s.x * cossinT.x - s.y * cossinT.y,
s.x * cossinT.y + s.y * cossinT.x,
s.z + cossinT.z);
dir = curVox - cursour;
ddv = dir.x * dir.x + dir.y * dir.y;
invl = rsqrtf(ddv);
idxZ = dir.z * S2D * invl + detCntIdx.y + 0.5;
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * invl * invsid);
if (fabsf(cursour.x) >= fabsf(cursour.y))
{
cosVal = fabsf(1.0 / dir.x);
}
else
{
cosVal = fabsf(1.0 / dir.y);
}
cosVal *= (dx * sqrtf(ddv + dir.z * dir.z));
idxXY = alpha * dbeta + detCntIdx.x + 0.5;
summ += tex3D<float>(texObj, idxZ, idxXY, angIdx + 0.5f) * cosVal;
}
__syncthreads();
vol[(j * VN.x + i) * VN.z + k] = summ;
}
}
void DD3Back_branchless_sat2d_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startVOL, int gpuNum)
{
const int nDNU = DNU + 1;
const int nDNV = DNV + 1;
thrust::host_vector<float> hangs(h_angs, h_angs + PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos + PN);
std::vector<int> ObjZIdx_Start(startVOL, startVOL + gpuNum);
std::vector<int> ObjZIdx_End(ObjZIdx_Start.size());
std::copy(ObjZIdx_Start.begin() + 1, ObjZIdx_Start.end(), ObjZIdx_End.begin());
ObjZIdx_End[gpuNum - 1] = ZN;
std::vector<int> prjIdx_Start(gpuNum);
std::vector<int> prjIdx_End(gpuNum);
const float objCntIdxZ = (ZN - 1.0f) * 0.5 - imgZCenter / dz;
const float detStpZ = (zds[DNV - 1] - zds[0]) / (DNV - 1.0f); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector Center along Z direction
std::vector<int> SZN = ObjZIdx_End - ObjZIdx_Start; // sub volume slices number
std::vector<float> subImgZCenter(gpuNum,0.0f);
std::vector<int> SPN(gpuNum);
const float objCntIdxX = (XN - 1.0f) * 0.5f - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0f) * 0.5f - imgYCenter / dx;
std::vector<float3> sour(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > vol(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
thrust::host_vector<cudaArray*> d_prjArray(gpuNum);
thrust::host_vector<cudaTextureObject_t> texObj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prjSAT(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<cudaStream_t> stream(gpuNum);
const float4 detParas = calDetParas(xds, yds, zds, x0, y0, z0, DNU, DNV);
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
// Pre calculate the cossin z positions
thrust::device_vector<float3> COSSINZT(PN);
thrust::device_vector<float> ANGS = hangs;
thrust::device_vector<float> ZPOS = hzPos;
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(ANGS.begin(), ZPOS.begin())),
thrust::make_zip_iterator(thrust::make_tuple(ANGS.end(), ZPOS.end())),
COSSINZT.begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
dim3 copyBlk(64,16,1);
thrust::host_vector<dim3> copyGid(gpuNum);
dim3 blk(BACK_BLKX, BACK_BLKY, BACK_BLKZ);
thrust::host_vector<dim3> gid(gpuNum);
dim3 vertGenBlk(512,1,1);
thrust::host_vector<dim3> vertGenGid(gpuNum);
dim3 horzGenBlk(64,16,1);
thrust::host_vector<dim3> horzGenGid(gpuNum);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
thrust::host_vector<thrust::host_vector<float> > subVol(gpuNum);
std::vector<size_t> siz(gpuNum,0);
std::vector<size_t> nsiz(gpuNum,0);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
// get projection view index pair
getPrjIdxPair<float>(hzPos, ObjZIdx_Start[i], ObjZIdx_End[i],
objCntIdxZ, dz, ZN, detCntIdxV, detStpZ, DNV,
prjIdx_Start[i], prjIdx_End[i]);
SPN[i] = prjIdx_End[i] - prjIdx_Start[i];
//std::cout<<i<<" "<<prjIdx_Start[i]<<" "<<prjIdx_End[i]<<"\n";
// Calculate the corresponding center position index of the sub volumes
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjZIdx_Start[i] - 0.5f; // index position
cudaSetDevice(i);
cudaStreamCreate(&stream[i]);
// Generate the SAT for the projection data
siz[i] = DNU * DNV * SPN[i];
nsiz[i] = (DNU + 1) * (DNV + 1) * SPN[i];
prjSAT[i].resize(nsiz[i]);
prj[i].resize(siz[i]);
thrust::copy(
hprj + DNU * DNV * prjIdx_Start[i],
hprj + DNU * DNV * prjIdx_End[i],
prj[i].begin());
copyGid[i].x = (DNV + copyBlk.x - 1) / copyBlk.x;
copyGid[i].y = (DNU + copyBlk.y - 1) / copyBlk.y;
copyGid[i].z = (SPN[i] + copyBlk.z - 1) / copyBlk.z;
addOneSidedZeroBoarder<<<copyGid[i], copyBlk, 0, stream[i]>>>(
thrust::raw_pointer_cast(&prj[i][0]),
thrust::raw_pointer_cast(&prjSAT[i][0]),
DNU, DNV, SPN[i]);
//cudaStreamSynchronize(stream[i]);
vertGenGid[i].x = (nDNU * SPN[i] + vertGenBlk.x - 1) / copyBlk.x;
vertGenGid[i].y = 1;
vertGenGid[i].z = 1;
verticalIntegral2 << <vertGenGid[i], vertGenBlk, 0, stream[i] >> >(
thrust::raw_pointer_cast(&prjSAT[i][0]), nDNV, nDNU * SPN[i]);
horzGenGid[i].x = (nDNV + horzGenBlk.x - 1) / horzGenBlk.x;
horzGenGid[i].y = (SPN[i] + horzGenBlk.y - 1) / horzGenBlk.y;
horzGenGid[i].z = 1;
heorizontalIntegral2 << <horzGenGid[i], horzGenBlk,0,stream[i] >> >(
thrust::raw_pointer_cast(&prjSAT[i][0]), nDNU, nDNV, SPN[i]);
prj[i].clear();
cudaExtent prjSize;
prjSize.width = DNV + 1;
prjSize.height = DNU + 1;
prjSize.depth = SPN[i];
cudaMalloc3DArray(&d_prjArray[i], &channelDesc, prjSize);
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr(
(void*) thrust::raw_pointer_cast(&prjSAT[i][0]),
prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize;
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3DAsync(©Params,stream[i]);
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.addressMode[2] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
cudaCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
prjSAT[i].clear();
// The part above are for branchless DD
gid[i].x = (SZN[i] + blk.x - 1) / blk.x;
gid[i].y = (XN + blk.y - 1) / blk.y;
gid[i].z = (YN + blk.z - 1) / blk.z;
vol[i].resize(XN * YN * SZN[i]);
msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, msk[i].begin());
cossinZT[i].resize(SPN[i]);
thrust::copy(
COSSINZT.begin() + prjIdx_Start[i],
COSSINZT.begin() + prjIdx_End[i],
cossinZT[i].begin());
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
DD3_gpu_back_ker<_BRANCHLESS> << <gid[i], blk, 0, stream[i] >> >(texObj[i],
thrust::raw_pointer_cast(&vol[i][0]), thrust::raw_pointer_cast(&msk[i][0]),
thrust::raw_pointer_cast(&cossinZT[i][0]), make_float3(x0, y0, z0), S2D,
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]), // have to be changed
dx, dz, detParas.z, detParas.w, make_float2(detParas.x, detParas.y),
make_int3(XN, YN, SZN[i]), SPN[i], 0);
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0 ;i < gpuNum; ++i)
{
cudaSetDevice(i);
// copy the volume back.
subVol[i].resize(XN * YN * SZN[i]);
thrust::copy(vol[i].begin(), vol[i].end(), subVol[i].begin());
vol[i].clear();
msk[i].clear();
cossinZT[i].clear();
cudaDestroyTextureObject(texObj[i]);
cudaFreeArray(d_prjArray[i]);
}
cudaDeviceSynchronize();
combineVolume<float>(hvol, XN, YN, ZN, subVol, &(SZN[0]), gpuNum);
hangs.clear();
hzPos.clear();
ObjZIdx_Start.clear();
ObjZIdx_End.clear();
prjIdx_Start.clear();
prjIdx_End.clear();
SZN.clear();
subImgZCenter.clear();
SPN.clear();
sour.clear();
msk.clear();
vol.clear();
cossinZT.clear();
d_prjArray.clear();
texObj.clear();
prjSAT.clear();
prj.clear();
stream.clear();
COSSINZT.clear();
ANGS.clear();
ZPOS.clear();
copyGid.clear();
gid.clear();
vertGenGid.clear();
horzGenGid.clear();
}
void DD3Back_pseudo_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* h_angs, float* h_zPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask,const int* startVOL, int gpuNum)
{
thrust::host_vector<float> hangs(h_angs, h_angs + PN);
thrust::host_vector<float> hzPos(h_zPos, h_zPos + PN);
std::vector<int> ObjZIdx_Start(startVOL, startVOL + gpuNum);
std::vector<int> ObjZIdx_End(ObjZIdx_Start.size());
std::copy(ObjZIdx_Start.begin() + 1, ObjZIdx_Start.end(), ObjZIdx_End.begin());
ObjZIdx_End[gpuNum - 1] = ZN;
std::vector<int> prjIdx_Start(gpuNum);
std::vector<int> prjIdx_End(gpuNum);
const float objCntIdxZ = (ZN - 1.0f) * 0.5 - imgZCenter / dz;
const float detStpZ = (zds[DNV - 1] - zds[0]) / (DNV - 1.0f); // detector cell height
const float detCntIdxV = -zds[0] / detStpZ; // Detector Center along Z direction
std::vector<int> SZN = ObjZIdx_End - ObjZIdx_Start; // sub volume slices number
std::vector<float> subImgZCenter(gpuNum,0.0f);
std::vector<int> SPN(gpuNum);
const float objCntIdxX = (XN - 1.0f) * 0.5f - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0f) * 0.5f - imgYCenter / dx;
std::vector<float3> sour(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > vol(gpuNum);
thrust::host_vector<thrust::device_vector<float3> > cossinZT(gpuNum);
thrust::host_vector<cudaArray*> d_prjArray(gpuNum);
thrust::host_vector<cudaTextureObject_t> texObj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<cudaStream_t> stream(gpuNum);
const float4 detParas = calDetParas(xds, yds, zds, x0, y0, z0, DNU, DNV);
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
// Pre calculate the cossin z positions
thrust::device_vector<float3> COSSINZT(PN);
thrust::device_vector<float> ANGS = hangs;
thrust::device_vector<float> ZPOS = hzPos;
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(ANGS.begin(), ZPOS.begin())),
thrust::make_zip_iterator(thrust::make_tuple(ANGS.end(), ZPOS.end())),
COSSINZT.begin(), CTMBIR::ConstantForBackProjection4(x0, y0, z0));
dim3 copyBlk(64,16,1);
thrust::host_vector<dim3> copyGid(gpuNum);
dim3 blk(BACK_BLKX, BACK_BLKY, BACK_BLKZ);
thrust::host_vector<dim3> gid(gpuNum);
dim3 vertGenBlk(512,1,1);
thrust::host_vector<dim3> vertGenGid(gpuNum);
dim3 horzGenBlk(64,16,1);
thrust::host_vector<dim3> horzGenGid(gpuNum);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
thrust::host_vector<thrust::host_vector<float> > subVol(gpuNum);
std::vector<size_t> siz(gpuNum,0);
std::vector<size_t> nsiz(gpuNum,0);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
// get projection view index pair
getPrjIdxPair<float>(hzPos, ObjZIdx_Start[i], ObjZIdx_End[i],
objCntIdxZ, dz, ZN, detCntIdxV, detStpZ, DNV,
prjIdx_Start[i], prjIdx_End[i]);
SPN[i] = prjIdx_End[i] - prjIdx_Start[i];
//std::cout<<i<<" "<<prjIdx_Start[i]<<" "<<prjIdx_End[i]<<"\n";
// Calculate the corresponding center position index of the sub volumes
subImgZCenter[i] = -imgZCenter / dz + ZN * 0.5 - ObjZIdx_Start[i] - 0.5f; // index position
cudaSetDevice(i);
cudaStreamCreate(&stream[i]);
////////////////////////////////////////////////////////////////////////
siz[i] = DNU * DNV * SPN[i];
prj[i].resize(siz[i]);
thrust::copy(
hprj + DNU * DNV * prjIdx_Start[i],
hprj + DNU * DNV * prjIdx_End[i],
prj[i].begin());
cudaExtent prjSize;
prjSize.width = DNV;
prjSize.height = DNU;
prjSize.depth = SPN[i];
cudaMalloc3DArray(&d_prjArray[i], &channelDesc, prjSize);
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr(
(void*) thrust::raw_pointer_cast(&prj[i][0]),
prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize;
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3DAsync(©Params,stream[i]);
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.addressMode[2] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
cudaCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
prj[i].clear();
////////////////////////////////////////////////////////////////////////
// Generate the SAT for the projection data
// The part above are for branchless DD
gid[i].x = (SZN[i] + blk.x - 1) / blk.x;
gid[i].y = (XN + blk.y - 1) / blk.y;
gid[i].z = (YN + blk.z - 1) / blk.z;
vol[i].resize(XN * YN * SZN[i]);
msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, msk[i].begin());
cossinZT[i].resize(SPN[i]);
thrust::copy(
COSSINZT.begin() + prjIdx_Start[i],
COSSINZT.begin() + prjIdx_End[i],
cossinZT[i].begin());
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
DD3_gpu_back_ker<_PSEUDODD> << <gid[i], blk, 0, stream[i] >> >(texObj[i],
thrust::raw_pointer_cast(&vol[i][0]), thrust::raw_pointer_cast(&msk[i][0]),
thrust::raw_pointer_cast(&cossinZT[i][0]), make_float3(x0, y0, z0), S2D,
make_float3(objCntIdxX, objCntIdxY, subImgZCenter[i]), // have to be changed
dx, dz, detParas.z, detParas.w, make_float2(detParas.x, detParas.y),
make_int3(XN, YN, SZN[i]), SPN[i], 0);
}
#pragma omp barrier
#pragma omp parallel for
for (int i = 0; i < gpuNum; ++i)
{
// copy the volume back.
subVol[i].resize(XN * YN * SZN[i]);
thrust::copy(vol[i].begin(), vol[i].end(), subVol[i].begin());
vol[i].clear();
msk[i].clear();
cossinZT[i].clear();
cudaDestroyTextureObject(texObj[i]);
cudaFreeArray(d_prjArray[i]);
}
cudaDeviceSynchronize();
combineVolume<float>(hvol, XN, YN, ZN, subVol, &(SZN[0]), gpuNum);
hangs.clear();
hzPos.clear();
ObjZIdx_Start.clear();
ObjZIdx_End.clear();
prjIdx_Start.clear();
prjIdx_End.clear();
SZN.clear();
subImgZCenter.clear();
SPN.clear();
sour.clear();
msk.clear();
vol.clear();
cossinZT.clear();
d_prjArray.clear();
texObj.clear();
prj.clear();
stream.clear();
COSSINZT.clear();
ANGS.clear();
ZPOS.clear();
copyGid.clear();
gid.clear();
vertGenGid.clear();
horzGenGid.clear();
}
extern "C"
void DD3Back_multiGPU(
float x0, float y0, float z0,
int DNU, int DNV,
float* xds, float* yds, float* zds,
float imgXCenter, float imgYCenter, float imgZCenter,
float* hangs, float* hzPos, int PN,
int XN, int YN, int ZN,
float* hvol, float* hprj,
float dx, float dz,
byte* mask, int bakMode,const int* startVOL, int gpuNum)
{
switch(bakMode)
{
case 0: // Branchless backprojection
DD3Back_branchless_sat2d_multiGPU(x0, y0, z0,
DNU, DNV, xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj,
dx, dz, mask, startVOL, gpuNum);
break;
default: // Volume Rendering backprojection
DD3Back_pseudo_multiGPU(x0, y0, z0,
DNU, DNV, xds, yds, zds, imgXCenter, imgYCenter, imgZCenter,
hangs, hzPos, PN, XN, YN, ZN, hvol, hprj,
dx, dz, mask, startVOL, gpuNum);
break;
}
}
|
a946222652cef0d054b9daa78e46b43e4bbb24e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
__global__ void fun(int *y){
long long *p = malloc(sizeof(long long));
*p = 5;
*p = *(short*)p;
printf("%d\n", *(short*)p);
}
int main(void)
{
int y;
int *dev_y;
hipMalloc((void**)&dev_y, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_y);
hipMemcpy(&y, dev_y, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_y);
return 0;
}
//; gcc:5;nvcc: error: a value of type "void *" cannot be used to initialize an entity of type "long long *";
| a946222652cef0d054b9daa78e46b43e4bbb24e3.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
__global__ void fun(int *y){
long long *p = malloc(sizeof(long long));
*p = 5;
*p = *(short*)p;
printf("%d\n", *(short*)p);
}
int main(void)
{
int y;
int *dev_y;
cudaMalloc((void**)&dev_y, sizeof(int));
fun<<<1,1>>>(dev_y);
cudaMemcpy(&y, dev_y, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_y);
return 0;
}
//编译通过; gcc:5;nvcc: error: a value of type "void *" cannot be used to initialize an entity of type "long long *";
|
367b7c28ca1b71e7e2afabbad37cc9c90691ff2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include "circuit.h"
#include "device.h"
using namespace std;
//extern CIRCUIT Circuit;
void CIRCUIT::CUDA_MemoryAllocate(){
#ifdef PROFILE
Timer.TimerStart();
#endif
CudaData = new CUDA_DATA;
CudaData->h_DecodedData = new int[n_Total]; //redundant, for CPU debug
hipMalloc((void**)&CudaData->d_DecodedData, n_Total * sizeof(int));
////// BitNode on host //////
CudaData->h_LcxSize = new int [n_Total]; //redundant?
CudaData->h_LcxBegin = new int [n_Total];
#ifdef DOUBLE
CudaData->h_Lcx = new double [TotalEdge];
CudaData->h_Lint = new double [TotalEdge]; //v2
#else
CudaData->h_Lcx = new float [TotalEdge];
CudaData->h_Lint = new float [TotalEdge];
#endif
CudaData->h_NextLcxIndex = new int [TotalEdge]; //v2
CudaData->h_LxcPosition = new int [TotalEdge];
////// CheckNode on host //////
CudaData->h_LxcSize = new int [k_Total]; //redundant?
#ifdef DOUBLE
CudaData->h_Lxc = new double [TotalEdge];
#else
CudaData->h_Lxc = new float [TotalEdge];
#endif
CudaData->h_LxcBegin = new int [k_Total];
CudaData->h_NextLxcIndex = new int [TotalEdge]; //v2
CudaData->h_LcxPosition = new int [TotalEdge];
////// BitNode on device //////
hipMalloc((void**)&CudaData->d_LcxSize, n_Total * sizeof(int)); //redundant?
hipMalloc((void**)&CudaData->d_LcxBegin, n_Total * sizeof(int)); //redundant?
#ifdef DOUBLE
hipMalloc((void**)&CudaData->d_Lcx, TotalEdge * sizeof(double));
hipMalloc((void**)&CudaData->d_Lint, TotalEdge * sizeof(double));
#else
hipMalloc((void**)&CudaData->d_Lcx, TotalEdge * sizeof(float));
hipMalloc((void**)&CudaData->d_Lint, TotalEdge * sizeof(float));
#endif
hipMalloc((void**)&CudaData->d_NextLcxIndex, TotalEdge * sizeof(int));
hipMalloc((void**)&CudaData->d_LxcPosition, TotalEdge * sizeof(int));
////// CheckNode on device //////
hipMalloc((void**)&CudaData->d_LxcSize, k_Total * sizeof(int)); //redundant?
#ifdef DOUBLE
hipMalloc((void**)&CudaData->d_Lxc, TotalEdge * sizeof(double));
#else
hipMalloc((void**)&CudaData->d_Lxc, TotalEdge * sizeof(float));
#endif
hipMalloc((void**)&CudaData->d_LxcBegin, k_Total * sizeof(int)); //redundant?
hipMalloc((void**)&CudaData->d_NextLxcIndex, TotalEdge * sizeof(int));
hipMalloc((void**)&CudaData->d_LcxPosition, TotalEdge * sizeof(int));
#ifdef PROFILE
Timer.TimerFinish(Timer.tCUDA_MemoryAllocate);
#endif
}
void CIRCUIT::CUDA_CreateDataArray(){
#ifdef PROFILE
Timer.TimerStart();
#endif
int Begin=0;
int Index=0; //v2
///// BitNode /////
for(unsigned i=0; i<n_Total; i++){
CudaData->h_LcxSize[i] = BitNode[i]->Lcx.size();
CudaData->h_LcxBegin[i] = Begin;
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){ //v2
CudaData->h_NextLcxIndex[Index] = Index+1;
Index++;
}
CudaData->h_NextLcxIndex[Index-1] = Begin;
Begin=Begin+BitNode[i]->Lcx.size();
}
///// CheckNode //////
Begin=0;
Index=0; //v2
for(unsigned i=0; i<k_Total; i++){
CudaData->h_LxcSize[i] = CheckNode[i]->Lxc.size();
CudaData->h_LxcBegin[i] = Begin;
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++){
CudaData->h_LcxPosition[Begin+j] = CudaData->h_LcxBegin[CheckNode[i]->BitNode_H[j]->ID] + CheckNode[i]->Lcx_position[j];
CudaData->h_NextLxcIndex[Index] = Index + 1;
Index++;
}
CudaData->h_NextLxcIndex[Index-1] = Begin;
Begin=Begin+CheckNode[i]->Lxc.size();
}
///// BitNode /////
for(unsigned i=0; i<n_Total; i++){
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++)
CudaData->h_LxcPosition[CudaData->h_LcxBegin[i]+j] = CudaData->h_LxcBegin[BitNode[i]->CheckNode_H[j]->ID] + BitNode[i]->Lxc_position[j];
}
///// Debug /////
//Index = 0;
/*for(unsigned i=0; i<n; i++){
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){ //v2
cout<<CudaData->h_NextLcxIndex[Index]<<" ";
Index++;
}
cout<<endl;
}*/
/*for(unsigned i=0; i<m; i++){
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++){
cout<<CudaData->h_NextLxcIndex[Index]<<" ";
Index++;
}
cout<<endl;
}
cout<<"TotalEdge = "<<TotalEdge<<endl;
char a;
cin>>a;*/
#ifdef PROFILE
Timer.TimerFinish(Timer.tCUDA_CreateDataArray);
#endif
}
void CIRCUIT::CUDA_CreateDataArray2(){
#ifdef PROFILE
Timer.TimerStart();
#endif
for(unsigned i=0; i<n_Total; i++){
//CudaData->h_Lint[i] = BitNode[i]->Lint;
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){
//CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j] = BitNode[i]->Lcx[j];
CudaData->h_Lint[CudaData->h_LcxBegin[i]+j] = BitNode[i]->Lint;
}
}
for(unsigned i=0; i<k_Total; i++)
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++)
CudaData->h_Lxc[CudaData->h_LxcBegin[i]+j] = CheckNode[i]->Lxc[j];
#ifdef PROFILE
Timer.TimerFinish(Timer.tCUDA_CreateDataArray2);
#endif
}
void CIRCUIT::UpdateLcx_CPU(){
for(int i=0; i<TotalEdge; i++){
double sgn=1;
double minLxc=1000;
int Index = CudaData->h_NextLxcIndex[i];
while(Index != i){
if(CudaData->h_Lxc[Index] > 0)
sgn = sgn*1;
else
sgn = sgn*(-1);
minLxc = min(minLxc, fabs(CudaData->h_Lxc[Index]));
Index = CudaData->h_NextLxcIndex[Index];
}
CudaData->h_Lcx[CudaData->h_LcxPosition[i]] = sgn * minLxc;
}
}
void CIRCUIT::UpdateLxc_CPU(){
for(int i=0; i<TotalEdge; i++){
double sumLcx=0;
int Index = CudaData->h_NextLcxIndex[i];
while(Index != i){
sumLcx = sumLcx + CudaData->h_Lcx[Index];
Index = CudaData->h_NextLcxIndex[Index];
}
CudaData->h_Lxc[CudaData->h_LxcPosition[i]] = CudaData->h_Lint[i] + sumLcx;
}
}
void CIRCUIT::Calculate_Posterior_CPU(){
for(int i=0; i<n_Total; i++){
double sumLcx=0;
for(int j=0; j<CudaData->h_LcxSize[i]; j++)
sumLcx = sumLcx + CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j];
if(CudaData->h_Lint[CudaData->h_LcxBegin[i]] + sumLcx >= 0)
CudaData->h_DecodedData[i] = 0;
else
CudaData->h_DecodedData[i] = 1;
}
}
void CIRCUIT::MemoryCopy_H2D(){
#ifdef PROFILE
Timer.TimerStart();
#endif
////// BitNode on device //////
hipMemcpy(CudaData->d_LcxSize, CudaData->h_LcxSize, n_Total * sizeof(int), hipMemcpyHostToDevice);//redundant
hipMemcpy(CudaData->d_LcxBegin, CudaData->h_LcxBegin, n_Total * sizeof(int), hipMemcpyHostToDevice);//redundant
hipMemcpy(CudaData->d_NextLcxIndex, CudaData->h_NextLcxIndex, TotalEdge * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(CudaData->d_LxcPosition, CudaData->h_LxcPosition, TotalEdge * sizeof(int), hipMemcpyHostToDevice);
////// CheckNode on device //////
hipMemcpy(CudaData->d_LxcSize, CudaData->h_LxcSize, k_Total * sizeof(int), hipMemcpyHostToDevice);//redundant
hipMemcpy(CudaData->d_LxcBegin, CudaData->h_LxcBegin, k_Total * sizeof(int), hipMemcpyHostToDevice);//redundant
hipMemcpy(CudaData->d_NextLxcIndex, CudaData->h_NextLxcIndex, TotalEdge * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(CudaData->d_LcxPosition, CudaData->h_LcxPosition, TotalEdge * sizeof(int), hipMemcpyHostToDevice);
#ifdef PROFILE
Timer.TimerFinish(Timer.tMemoryCopy_H2D);
#endif
}
void CIRCUIT::MemoryCopy_H2D2(){
#ifdef PROFILE
Timer.TimerStart();
#endif
#ifdef DOUBLE
////// BitNode on device //////
//hipMemcpy(CudaData->d_Lcx, CudaData->h_Lcx, TotalEdge * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(CudaData->d_Lint, CudaData->h_Lint, TotalEdge * sizeof(double), hipMemcpyHostToDevice);
////// CheckNode on device //////
hipMemcpy(CudaData->d_Lxc, CudaData->h_Lxc, TotalEdge * sizeof(double), hipMemcpyHostToDevice);
#else
hipMemcpy(CudaData->d_Lint, CudaData->h_Lint, TotalEdge * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(CudaData->d_Lxc, CudaData->h_Lxc, TotalEdge * sizeof(float), hipMemcpyHostToDevice);
#endif
#ifdef PROFILE
Timer.TimerFinish(Timer.tMemoryCopy_H2D2);
#endif
}
void CIRCUIT::MemoryCopy_D2H(){
#ifdef PROFILE
Timer.TimerStart();
#endif
//hipMemcpy(CudaData->h_Lcx, CudaData->d_Lcx, TotalEdge * sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(CudaData->h_DecodedData, CudaData->d_DecodedData, n * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(DecodedData, CudaData->d_DecodedData, n_Total * sizeof(int), hipMemcpyDeviceToHost);
#ifdef PROFILE
Timer.TimerFinish(Timer.tMemoryCopy_D2H);
#endif
}
#ifdef DOUBLE
__global__ void UpdateLcx_GPU(double* d_Lxc, int* d_NextLxcIndex, int* d_LcxPosition, double* d_Lcx, int TotalEdge){
#else
__global__ void UpdateLcx_GPU(float* d_Lxc, int* d_NextLxcIndex, int* d_LcxPosition, float* d_Lcx, int TotalEdge){
#endif
int total_task = gridDim.x * blockDim.x;
int task_sn = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=task_sn; i<TotalEdge; i+=total_task){
#ifdef DOUBLE
double sgn=1;
double minLxc=1000;
#else
float sgn=1;
float minLxc=1000;
#endif
int Index = d_NextLxcIndex[i];
while(Index != i){
/*if(d_Lxc[Index] > 0)
sgn = sgn*1;
else
sgn = sgn*(-1);*/
if(d_Lxc[Index] < 0)
sgn = sgn*(-1);
minLxc = min(minLxc, fabs(d_Lxc[Index]));
Index = d_NextLxcIndex[Index];
}
d_Lcx[d_LcxPosition[i]] = sgn * minLxc;
}
}
#ifdef DOUBLE
__global__ void UpdateLxc_GPU(double* d_Lcx, int* d_NextLcxIndex, int* d_LxcPosition, double* d_Lxc, double* d_Lint, int TotalEdge){
#else
__global__ void UpdateLxc_GPU(float* d_Lcx, int* d_NextLcxIndex, int* d_LxcPosition, float* d_Lxc, float* d_Lint, int TotalEdge){
#endif
int total_task = gridDim.x * blockDim.x;
int task_sn = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=task_sn; i<TotalEdge; i+=total_task){
#ifdef DOUBLE
double sumLcx=0;
#else
float sumLcx=0;
#endif
int Index = d_NextLcxIndex[i];
while(Index != i){
sumLcx = sumLcx + d_Lcx[Index];
Index = d_NextLcxIndex[Index];
}
d_Lxc[d_LxcPosition[i]] = d_Lint[i] + sumLcx;
}
}
#ifdef DOUBLE
__global__ void Calculate_Posterior_GPU(int* d_LcxSize, double* d_Lcx, int* d_LcxBegin, double* d_Lint, int* d_DecodedData,int n_Total){
#else
__global__ void Calculate_Posterior_GPU(int* d_LcxSize, float* d_Lcx, int* d_LcxBegin, float* d_Lint, int* d_DecodedData,int n_Total){
#endif
int total_task = gridDim.x * blockDim.x;
int task_sn = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=task_sn; i<n_Total; i+=total_task){
#ifdef DOUBLE
double sumLcx=0;
#else
float sumLcx=0;
#endif
for(int j=0; j<d_LcxSize[i]; j++)
sumLcx = sumLcx + d_Lcx[d_LcxBegin[i]+j];
if(d_Lint[d_LcxBegin[i]] + sumLcx >= 0)
d_DecodedData[i] = 0;
else
d_DecodedData[i] = 1;
}
}
void CIRCUIT::Debug(){
/*for(int i=0; i<m; i++){
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++){
if(CheckNode[i]->Lxc[j] != CudaData->h_Lxc[CudaData->h_LxcBegin[i]+j])
cout<<CheckNode[i]->Lxc[j]<<" "<<CudaData->h_Lxc[CudaData->h_LxcBegin[i]+j]<<endl;
}
}
for(int i=0; i<n; i++){
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){
if(BitNode[i]->Lcx[j] != CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j])
cout<<BitNode[i]->Lcx[j]<<" "<<CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j]<<endl;
}
}*/
//cout<<"stop"<<endl;
//getchar();
for(int i=0; i<n_Total; i++){
if(DecodedData[i] != CudaData->h_DecodedData[i])
cout<<DecodedData[i]<<" "<<CudaData->h_DecodedData[i]<<endl;
}
}
| 367b7c28ca1b71e7e2afabbad37cc9c90691ff2c.cu | #include <iostream>
#include <math.h>
#include "circuit.h"
#include "device.h"
using namespace std;
//extern CIRCUIT Circuit;
void CIRCUIT::CUDA_MemoryAllocate(){
#ifdef PROFILE
Timer.TimerStart();
#endif
CudaData = new CUDA_DATA;
CudaData->h_DecodedData = new int[n_Total]; //redundant, for CPU debug
cudaMalloc((void**)&CudaData->d_DecodedData, n_Total * sizeof(int));
////// BitNode on host //////
CudaData->h_LcxSize = new int [n_Total]; //redundant?
CudaData->h_LcxBegin = new int [n_Total];
#ifdef DOUBLE
CudaData->h_Lcx = new double [TotalEdge];
CudaData->h_Lint = new double [TotalEdge]; //v2
#else
CudaData->h_Lcx = new float [TotalEdge];
CudaData->h_Lint = new float [TotalEdge];
#endif
CudaData->h_NextLcxIndex = new int [TotalEdge]; //v2
CudaData->h_LxcPosition = new int [TotalEdge];
////// CheckNode on host //////
CudaData->h_LxcSize = new int [k_Total]; //redundant?
#ifdef DOUBLE
CudaData->h_Lxc = new double [TotalEdge];
#else
CudaData->h_Lxc = new float [TotalEdge];
#endif
CudaData->h_LxcBegin = new int [k_Total];
CudaData->h_NextLxcIndex = new int [TotalEdge]; //v2
CudaData->h_LcxPosition = new int [TotalEdge];
////// BitNode on device //////
cudaMalloc((void**)&CudaData->d_LcxSize, n_Total * sizeof(int)); //redundant?
cudaMalloc((void**)&CudaData->d_LcxBegin, n_Total * sizeof(int)); //redundant?
#ifdef DOUBLE
cudaMalloc((void**)&CudaData->d_Lcx, TotalEdge * sizeof(double));
cudaMalloc((void**)&CudaData->d_Lint, TotalEdge * sizeof(double));
#else
cudaMalloc((void**)&CudaData->d_Lcx, TotalEdge * sizeof(float));
cudaMalloc((void**)&CudaData->d_Lint, TotalEdge * sizeof(float));
#endif
cudaMalloc((void**)&CudaData->d_NextLcxIndex, TotalEdge * sizeof(int));
cudaMalloc((void**)&CudaData->d_LxcPosition, TotalEdge * sizeof(int));
////// CheckNode on device //////
cudaMalloc((void**)&CudaData->d_LxcSize, k_Total * sizeof(int)); //redundant?
#ifdef DOUBLE
cudaMalloc((void**)&CudaData->d_Lxc, TotalEdge * sizeof(double));
#else
cudaMalloc((void**)&CudaData->d_Lxc, TotalEdge * sizeof(float));
#endif
cudaMalloc((void**)&CudaData->d_LxcBegin, k_Total * sizeof(int)); //redundant?
cudaMalloc((void**)&CudaData->d_NextLxcIndex, TotalEdge * sizeof(int));
cudaMalloc((void**)&CudaData->d_LcxPosition, TotalEdge * sizeof(int));
#ifdef PROFILE
Timer.TimerFinish(Timer.tCUDA_MemoryAllocate);
#endif
}
void CIRCUIT::CUDA_CreateDataArray(){
#ifdef PROFILE
Timer.TimerStart();
#endif
int Begin=0;
int Index=0; //v2
///// BitNode /////
for(unsigned i=0; i<n_Total; i++){
CudaData->h_LcxSize[i] = BitNode[i]->Lcx.size();
CudaData->h_LcxBegin[i] = Begin;
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){ //v2
CudaData->h_NextLcxIndex[Index] = Index+1;
Index++;
}
CudaData->h_NextLcxIndex[Index-1] = Begin;
Begin=Begin+BitNode[i]->Lcx.size();
}
///// CheckNode //////
Begin=0;
Index=0; //v2
for(unsigned i=0; i<k_Total; i++){
CudaData->h_LxcSize[i] = CheckNode[i]->Lxc.size();
CudaData->h_LxcBegin[i] = Begin;
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++){
CudaData->h_LcxPosition[Begin+j] = CudaData->h_LcxBegin[CheckNode[i]->BitNode_H[j]->ID] + CheckNode[i]->Lcx_position[j];
CudaData->h_NextLxcIndex[Index] = Index + 1;
Index++;
}
CudaData->h_NextLxcIndex[Index-1] = Begin;
Begin=Begin+CheckNode[i]->Lxc.size();
}
///// BitNode /////
for(unsigned i=0; i<n_Total; i++){
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++)
CudaData->h_LxcPosition[CudaData->h_LcxBegin[i]+j] = CudaData->h_LxcBegin[BitNode[i]->CheckNode_H[j]->ID] + BitNode[i]->Lxc_position[j];
}
///// Debug /////
//Index = 0;
/*for(unsigned i=0; i<n; i++){
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){ //v2
cout<<CudaData->h_NextLcxIndex[Index]<<" ";
Index++;
}
cout<<endl;
}*/
/*for(unsigned i=0; i<m; i++){
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++){
cout<<CudaData->h_NextLxcIndex[Index]<<" ";
Index++;
}
cout<<endl;
}
cout<<"TotalEdge = "<<TotalEdge<<endl;
char a;
cin>>a;*/
#ifdef PROFILE
Timer.TimerFinish(Timer.tCUDA_CreateDataArray);
#endif
}
void CIRCUIT::CUDA_CreateDataArray2(){
#ifdef PROFILE
Timer.TimerStart();
#endif
for(unsigned i=0; i<n_Total; i++){
//CudaData->h_Lint[i] = BitNode[i]->Lint;
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){
//CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j] = BitNode[i]->Lcx[j];
CudaData->h_Lint[CudaData->h_LcxBegin[i]+j] = BitNode[i]->Lint;
}
}
for(unsigned i=0; i<k_Total; i++)
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++)
CudaData->h_Lxc[CudaData->h_LxcBegin[i]+j] = CheckNode[i]->Lxc[j];
#ifdef PROFILE
Timer.TimerFinish(Timer.tCUDA_CreateDataArray2);
#endif
}
void CIRCUIT::UpdateLcx_CPU(){
for(int i=0; i<TotalEdge; i++){
double sgn=1;
double minLxc=1000;
int Index = CudaData->h_NextLxcIndex[i];
while(Index != i){
if(CudaData->h_Lxc[Index] > 0)
sgn = sgn*1;
else
sgn = sgn*(-1);
minLxc = min(minLxc, fabs(CudaData->h_Lxc[Index]));
Index = CudaData->h_NextLxcIndex[Index];
}
CudaData->h_Lcx[CudaData->h_LcxPosition[i]] = sgn * minLxc;
}
}
void CIRCUIT::UpdateLxc_CPU(){
for(int i=0; i<TotalEdge; i++){
double sumLcx=0;
int Index = CudaData->h_NextLcxIndex[i];
while(Index != i){
sumLcx = sumLcx + CudaData->h_Lcx[Index];
Index = CudaData->h_NextLcxIndex[Index];
}
CudaData->h_Lxc[CudaData->h_LxcPosition[i]] = CudaData->h_Lint[i] + sumLcx;
}
}
void CIRCUIT::Calculate_Posterior_CPU(){
for(int i=0; i<n_Total; i++){
double sumLcx=0;
for(int j=0; j<CudaData->h_LcxSize[i]; j++)
sumLcx = sumLcx + CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j];
if(CudaData->h_Lint[CudaData->h_LcxBegin[i]] + sumLcx >= 0)
CudaData->h_DecodedData[i] = 0;
else
CudaData->h_DecodedData[i] = 1;
}
}
void CIRCUIT::MemoryCopy_H2D(){
#ifdef PROFILE
Timer.TimerStart();
#endif
////// BitNode on device //////
cudaMemcpy(CudaData->d_LcxSize, CudaData->h_LcxSize, n_Total * sizeof(int), cudaMemcpyHostToDevice);//redundant
cudaMemcpy(CudaData->d_LcxBegin, CudaData->h_LcxBegin, n_Total * sizeof(int), cudaMemcpyHostToDevice);//redundant
cudaMemcpy(CudaData->d_NextLcxIndex, CudaData->h_NextLcxIndex, TotalEdge * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(CudaData->d_LxcPosition, CudaData->h_LxcPosition, TotalEdge * sizeof(int), cudaMemcpyHostToDevice);
////// CheckNode on device //////
cudaMemcpy(CudaData->d_LxcSize, CudaData->h_LxcSize, k_Total * sizeof(int), cudaMemcpyHostToDevice);//redundant
cudaMemcpy(CudaData->d_LxcBegin, CudaData->h_LxcBegin, k_Total * sizeof(int), cudaMemcpyHostToDevice);//redundant
cudaMemcpy(CudaData->d_NextLxcIndex, CudaData->h_NextLxcIndex, TotalEdge * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(CudaData->d_LcxPosition, CudaData->h_LcxPosition, TotalEdge * sizeof(int), cudaMemcpyHostToDevice);
#ifdef PROFILE
Timer.TimerFinish(Timer.tMemoryCopy_H2D);
#endif
}
void CIRCUIT::MemoryCopy_H2D2(){
#ifdef PROFILE
Timer.TimerStart();
#endif
#ifdef DOUBLE
////// BitNode on device //////
//cudaMemcpy(CudaData->d_Lcx, CudaData->h_Lcx, TotalEdge * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(CudaData->d_Lint, CudaData->h_Lint, TotalEdge * sizeof(double), cudaMemcpyHostToDevice);
////// CheckNode on device //////
cudaMemcpy(CudaData->d_Lxc, CudaData->h_Lxc, TotalEdge * sizeof(double), cudaMemcpyHostToDevice);
#else
cudaMemcpy(CudaData->d_Lint, CudaData->h_Lint, TotalEdge * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(CudaData->d_Lxc, CudaData->h_Lxc, TotalEdge * sizeof(float), cudaMemcpyHostToDevice);
#endif
#ifdef PROFILE
Timer.TimerFinish(Timer.tMemoryCopy_H2D2);
#endif
}
void CIRCUIT::MemoryCopy_D2H(){
#ifdef PROFILE
Timer.TimerStart();
#endif
//cudaMemcpy(CudaData->h_Lcx, CudaData->d_Lcx, TotalEdge * sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(CudaData->h_DecodedData, CudaData->d_DecodedData, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(DecodedData, CudaData->d_DecodedData, n_Total * sizeof(int), cudaMemcpyDeviceToHost);
#ifdef PROFILE
Timer.TimerFinish(Timer.tMemoryCopy_D2H);
#endif
}
#ifdef DOUBLE
__global__ void UpdateLcx_GPU(double* d_Lxc, int* d_NextLxcIndex, int* d_LcxPosition, double* d_Lcx, int TotalEdge){
#else
__global__ void UpdateLcx_GPU(float* d_Lxc, int* d_NextLxcIndex, int* d_LcxPosition, float* d_Lcx, int TotalEdge){
#endif
int total_task = gridDim.x * blockDim.x;
int task_sn = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=task_sn; i<TotalEdge; i+=total_task){
#ifdef DOUBLE
double sgn=1;
double minLxc=1000;
#else
float sgn=1;
float minLxc=1000;
#endif
int Index = d_NextLxcIndex[i];
while(Index != i){
/*if(d_Lxc[Index] > 0)
sgn = sgn*1;
else
sgn = sgn*(-1);*/
if(d_Lxc[Index] < 0)
sgn = sgn*(-1);
minLxc = min(minLxc, fabs(d_Lxc[Index]));
Index = d_NextLxcIndex[Index];
}
d_Lcx[d_LcxPosition[i]] = sgn * minLxc;
}
}
#ifdef DOUBLE
__global__ void UpdateLxc_GPU(double* d_Lcx, int* d_NextLcxIndex, int* d_LxcPosition, double* d_Lxc, double* d_Lint, int TotalEdge){
#else
__global__ void UpdateLxc_GPU(float* d_Lcx, int* d_NextLcxIndex, int* d_LxcPosition, float* d_Lxc, float* d_Lint, int TotalEdge){
#endif
int total_task = gridDim.x * blockDim.x;
int task_sn = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=task_sn; i<TotalEdge; i+=total_task){
#ifdef DOUBLE
double sumLcx=0;
#else
float sumLcx=0;
#endif
int Index = d_NextLcxIndex[i];
while(Index != i){
sumLcx = sumLcx + d_Lcx[Index];
Index = d_NextLcxIndex[Index];
}
d_Lxc[d_LxcPosition[i]] = d_Lint[i] + sumLcx;
}
}
#ifdef DOUBLE
__global__ void Calculate_Posterior_GPU(int* d_LcxSize, double* d_Lcx, int* d_LcxBegin, double* d_Lint, int* d_DecodedData,int n_Total){
#else
__global__ void Calculate_Posterior_GPU(int* d_LcxSize, float* d_Lcx, int* d_LcxBegin, float* d_Lint, int* d_DecodedData,int n_Total){
#endif
int total_task = gridDim.x * blockDim.x;
int task_sn = blockIdx.x*blockDim.x + threadIdx.x;
for(int i=task_sn; i<n_Total; i+=total_task){
#ifdef DOUBLE
double sumLcx=0;
#else
float sumLcx=0;
#endif
for(int j=0; j<d_LcxSize[i]; j++)
sumLcx = sumLcx + d_Lcx[d_LcxBegin[i]+j];
if(d_Lint[d_LcxBegin[i]] + sumLcx >= 0)
d_DecodedData[i] = 0;
else
d_DecodedData[i] = 1;
}
}
void CIRCUIT::Debug(){
/*for(int i=0; i<m; i++){
for(unsigned j=0; j<CheckNode[i]->Lxc.size(); j++){
if(CheckNode[i]->Lxc[j] != CudaData->h_Lxc[CudaData->h_LxcBegin[i]+j])
cout<<CheckNode[i]->Lxc[j]<<" "<<CudaData->h_Lxc[CudaData->h_LxcBegin[i]+j]<<endl;
}
}
for(int i=0; i<n; i++){
for(unsigned j=0; j<BitNode[i]->Lcx.size(); j++){
if(BitNode[i]->Lcx[j] != CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j])
cout<<BitNode[i]->Lcx[j]<<" "<<CudaData->h_Lcx[CudaData->h_LcxBegin[i]+j]<<endl;
}
}*/
//cout<<"stop"<<endl;
//getchar();
for(int i=0; i<n_Total; i++){
if(DecodedData[i] != CudaData->h_DecodedData[i])
cout<<DecodedData[i]<<" "<<CudaData->h_DecodedData[i]<<endl;
}
}
|
a7b9259722a0d43c62db774482748dd915ba1b4f.hip | // !!! This is a file automatically generated by hipify!!!
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include "model/decoder.h"
#include "model/encoder.h"
#include "proto/transformer_weight.h"
#include "tools/util.h"
#ifdef FP16_MODE
const lightseq::cuda::OperationType transformer_optytpe =
lightseq::cuda::OperationType::FP16;
#else
const lightseq::cuda::OperationType transformer_optytpe =
lightseq::cuda::OperationType::FP32;
#endif
namespace py = pybind11;
namespace lightseq {
namespace cuda {
class Transformer {
private:
typedef lightseq::cuda::OperationTypeTraits<transformer_optytpe> optraits;
lightseq::cuda::Encoder<transformer_optytpe> *encoder_;
lightseq::cuda::Decoder<transformer_optytpe> *decoder_;
optraits::DataType *d_encoder_output_;
int *d_input_;
int *d_output_;
int *d_padding_mask_;
int _max_batch_size;
hipStream_t stream_;
hipblasHandle_t hd_;
lightseq::cuda::TransformerWeight<transformer_optytpe> tw_;
std::set<std::string> available_sampling_methods = {"beam_search", "topk",
"topp", "topk_greedy"};
public:
Transformer(const std::string weight_path, const int max_batch_size)
: stream_(nullptr), hd_(nullptr), decoder_(nullptr) {
/* ---step1. init environment--- */
_max_batch_size = max_batch_size;
hipError_t cuerr = hipSetDevice(0);
if (cuerr != hipSuccess) {
throw std::runtime_error(hipGetErrorString(cuerr));
}
cuerr = hipStreamCreate(&stream_);
if (cuerr != hipSuccess) {
throw std::runtime_error(hipGetErrorString(cuerr));
}
hipblasStatus_t cublaserr = hipblasCreate(&hd_);
if (cublaserr != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to creat cublas handle ");
}
cublaserr = hipblasSetStream(hd_, stream_);
if (cublaserr != HIPBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to set stream for cublas handle");
}
/* ---step2. load model weights into GPU memory--- */
// saved in custom proto file
std::string model_weights_path = weight_path;
std::string res = tw_.initializing(model_weights_path);
if (!res.empty()) {
throw std::runtime_error(res);
}
if (tw_._sampling_method == "topk" || tw_._sampling_method == "topp") {
tw_._beam_size = 1;
}
tw_.print_model_config();
/*
step3. instantiate encoder and decoder, init the gpu memory buffer.
using thrust vector to avoid manage gpu memory by hand
*/
// register device memory for inputs and outputs
lightseq::cuda::CHECK_GPU_ERROR(
hipMalloc(&d_input_, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(hipMalloc(
&d_padding_mask_, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(hipMalloc(
&d_encoder_output_, _max_batch_size * tw_._max_step * tw_._hidden_size *
sizeof(optraits::DataType)));
lightseq::cuda::CHECK_GPU_ERROR(hipMalloc(
&d_output_,
_max_batch_size * tw_._beam_size * tw_._max_step * sizeof(int)));
encoder_ = new lightseq::cuda::Encoder<transformer_optytpe>(
max_batch_size, d_input_, d_padding_mask_, d_encoder_output_, tw_,
stream_, hd_);
res = encoder_->check();
if (!res.empty()) {
throw std::runtime_error(res);
}
decoder_ = new lightseq::cuda::Decoder<transformer_optytpe>(
_max_batch_size, d_padding_mask_, d_encoder_output_, d_output_, tw_,
stream_, hd_, true);
res = decoder_->check();
if (!res.empty()) {
throw std::runtime_error(res);
}
long buf_bytesize = ::max(encoder_->compute_buffer_bytesize(),
decoder_->compute_buffer_bytesize());
std::cout << "transformer buf_bytesize: " << buf_bytesize << std::endl;
void *d_buf_;
// encoder and decoder use the same buffer to save gpu memory useage
lightseq::cuda::CHECK_GPU_ERROR(
hipMalloc((void **)&d_buf_, (size_t)buf_bytesize));
encoder_->init_buffer(d_buf_);
decoder_->init_buffer(d_buf_);
cuerr = hipStreamSynchronize(stream_);
if (cuerr != hipSuccess) {
std::cout << "Failed to init GPU for transformer" << std::endl;
std::runtime_error(std::string(hipGetErrorString(cuerr)));
}
}
std::tuple<py::array_t<int>, py::array_t<float>> infer(
py::array_t<int, py::array::c_style | py::array::forcecast> input_seq,
bool multiple_output = false, std::string sampling_method = "",
int beam_size = -1, float length_penalty = -1, float topp = -1,
float topk = -1, float diverse_lambda = -1) {
if (available_sampling_methods.find(sampling_method) !=
available_sampling_methods.end()) {
tw_._sampling_method = sampling_method;
}
if (sampling_method == "topk" || sampling_method == "topp") {
multiple_output = false;
}
if (sampling_method == "topk_greedy") {
multiple_output = true;
}
decoder_->_output_topk = multiple_output;
auto input_seq_out = input_seq.mutable_unchecked<2>();
const int *input_seq_data = input_seq_out.data(0, 0);
int batch_size = input_seq_out.shape(0);
int batch_seq_len = input_seq_out.shape(1);
if (batch_size > _max_batch_size) {
throw std::runtime_error(
"batch size of input greater than max_batch_size");
}
if (batch_seq_len > tw_._max_step) {
throw std::runtime_error("seq len of input greater than max_step");
}
lightseq::cuda::CHECK_GPU_ERROR(hipMemcpyAsync(
d_input_, input_seq_data, sizeof(int) * input_seq_out.size(),
hipMemcpyHostToDevice, stream_));
encoder_->run_one_infer(batch_size, batch_seq_len);
decoder_->run_one_infer(batch_size, batch_seq_len);
int tokens_size = decoder_->_cur_step + 1;
beam_size = tw_._beam_size;
int output_k = multiple_output ? beam_size : 1;
auto tokens = py::array_t<int>({batch_size, output_k, tokens_size});
int *tokens_data = tokens.mutable_data(0, 0);
lightseq::cuda::CHECK_GPU_ERROR(hipMemcpy(tokens_data, d_output_,
sizeof(int) * tokens.size(),
hipMemcpyDeviceToHost));
auto scores = py::array_t<float>({batch_size, output_k});
float *scores_data = scores.mutable_data(0, 0);
lightseq::cuda::CHECK_GPU_ERROR(
hipMemcpy(scores_data, decoder_->_p_d_alive_seq_score,
sizeof(float) * scores.size(), hipMemcpyDeviceToHost));
return std::make_tuple(tokens, scores);
}
};
} // namespace cuda
} // namespace lightseq
| a7b9259722a0d43c62db774482748dd915ba1b4f.cu | #include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include "model/decoder.h"
#include "model/encoder.h"
#include "proto/transformer_weight.h"
#include "tools/util.h"
#ifdef FP16_MODE
const lightseq::cuda::OperationType transformer_optytpe =
lightseq::cuda::OperationType::FP16;
#else
const lightseq::cuda::OperationType transformer_optytpe =
lightseq::cuda::OperationType::FP32;
#endif
namespace py = pybind11;
namespace lightseq {
namespace cuda {
class Transformer {
private:
typedef lightseq::cuda::OperationTypeTraits<transformer_optytpe> optraits;
lightseq::cuda::Encoder<transformer_optytpe> *encoder_;
lightseq::cuda::Decoder<transformer_optytpe> *decoder_;
optraits::DataType *d_encoder_output_;
int *d_input_;
int *d_output_;
int *d_padding_mask_;
int _max_batch_size;
cudaStream_t stream_;
cublasHandle_t hd_;
lightseq::cuda::TransformerWeight<transformer_optytpe> tw_;
std::set<std::string> available_sampling_methods = {"beam_search", "topk",
"topp", "topk_greedy"};
public:
Transformer(const std::string weight_path, const int max_batch_size)
: stream_(nullptr), hd_(nullptr), decoder_(nullptr) {
/* ---step1. init environment--- */
_max_batch_size = max_batch_size;
cudaError_t cuerr = cudaSetDevice(0);
if (cuerr != cudaSuccess) {
throw std::runtime_error(cudaGetErrorString(cuerr));
}
cuerr = cudaStreamCreate(&stream_);
if (cuerr != cudaSuccess) {
throw std::runtime_error(cudaGetErrorString(cuerr));
}
cublasStatus_t cublaserr = cublasCreate(&hd_);
if (cublaserr != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to creat cublas handle ");
}
cublaserr = cublasSetStream(hd_, stream_);
if (cublaserr != CUBLAS_STATUS_SUCCESS) {
throw std::runtime_error("Failed to set stream for cublas handle");
}
/* ---step2. load model weights into GPU memory--- */
// saved in custom proto file
std::string model_weights_path = weight_path;
std::string res = tw_.initializing(model_weights_path);
if (!res.empty()) {
throw std::runtime_error(res);
}
if (tw_._sampling_method == "topk" || tw_._sampling_method == "topp") {
tw_._beam_size = 1;
}
tw_.print_model_config();
/*
step3. instantiate encoder and decoder, init the gpu memory buffer.
using thrust vector to avoid manage gpu memory by hand
*/
// register device memory for inputs and outputs
lightseq::cuda::CHECK_GPU_ERROR(
cudaMalloc(&d_input_, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(cudaMalloc(
&d_padding_mask_, _max_batch_size * tw_._max_step * sizeof(int)));
lightseq::cuda::CHECK_GPU_ERROR(cudaMalloc(
&d_encoder_output_, _max_batch_size * tw_._max_step * tw_._hidden_size *
sizeof(optraits::DataType)));
lightseq::cuda::CHECK_GPU_ERROR(cudaMalloc(
&d_output_,
_max_batch_size * tw_._beam_size * tw_._max_step * sizeof(int)));
encoder_ = new lightseq::cuda::Encoder<transformer_optytpe>(
max_batch_size, d_input_, d_padding_mask_, d_encoder_output_, tw_,
stream_, hd_);
res = encoder_->check();
if (!res.empty()) {
throw std::runtime_error(res);
}
decoder_ = new lightseq::cuda::Decoder<transformer_optytpe>(
_max_batch_size, d_padding_mask_, d_encoder_output_, d_output_, tw_,
stream_, hd_, true);
res = decoder_->check();
if (!res.empty()) {
throw std::runtime_error(res);
}
long buf_bytesize = std::max(encoder_->compute_buffer_bytesize(),
decoder_->compute_buffer_bytesize());
std::cout << "transformer buf_bytesize: " << buf_bytesize << std::endl;
void *d_buf_;
// encoder and decoder use the same buffer to save gpu memory useage
lightseq::cuda::CHECK_GPU_ERROR(
cudaMalloc((void **)&d_buf_, (size_t)buf_bytesize));
encoder_->init_buffer(d_buf_);
decoder_->init_buffer(d_buf_);
cuerr = cudaStreamSynchronize(stream_);
if (cuerr != cudaSuccess) {
std::cout << "Failed to init GPU for transformer" << std::endl;
std::runtime_error(std::string(cudaGetErrorString(cuerr)));
}
}
std::tuple<py::array_t<int>, py::array_t<float>> infer(
py::array_t<int, py::array::c_style | py::array::forcecast> input_seq,
bool multiple_output = false, std::string sampling_method = "",
int beam_size = -1, float length_penalty = -1, float topp = -1,
float topk = -1, float diverse_lambda = -1) {
if (available_sampling_methods.find(sampling_method) !=
available_sampling_methods.end()) {
tw_._sampling_method = sampling_method;
}
if (sampling_method == "topk" || sampling_method == "topp") {
multiple_output = false;
}
if (sampling_method == "topk_greedy") {
multiple_output = true;
}
decoder_->_output_topk = multiple_output;
auto input_seq_out = input_seq.mutable_unchecked<2>();
const int *input_seq_data = input_seq_out.data(0, 0);
int batch_size = input_seq_out.shape(0);
int batch_seq_len = input_seq_out.shape(1);
if (batch_size > _max_batch_size) {
throw std::runtime_error(
"batch size of input greater than max_batch_size");
}
if (batch_seq_len > tw_._max_step) {
throw std::runtime_error("seq len of input greater than max_step");
}
lightseq::cuda::CHECK_GPU_ERROR(cudaMemcpyAsync(
d_input_, input_seq_data, sizeof(int) * input_seq_out.size(),
cudaMemcpyHostToDevice, stream_));
encoder_->run_one_infer(batch_size, batch_seq_len);
decoder_->run_one_infer(batch_size, batch_seq_len);
int tokens_size = decoder_->_cur_step + 1;
beam_size = tw_._beam_size;
int output_k = multiple_output ? beam_size : 1;
auto tokens = py::array_t<int>({batch_size, output_k, tokens_size});
int *tokens_data = tokens.mutable_data(0, 0);
lightseq::cuda::CHECK_GPU_ERROR(cudaMemcpy(tokens_data, d_output_,
sizeof(int) * tokens.size(),
cudaMemcpyDeviceToHost));
auto scores = py::array_t<float>({batch_size, output_k});
float *scores_data = scores.mutable_data(0, 0);
lightseq::cuda::CHECK_GPU_ERROR(
cudaMemcpy(scores_data, decoder_->_p_d_alive_seq_score,
sizeof(float) * scores.size(), cudaMemcpyDeviceToHost));
return std::make_tuple(tokens, scores);
}
};
} // namespace cuda
} // namespace lightseq
|
f921477f89c5cfaf1e76f08ee589cab5effbe330.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <hip/hip_runtime_api.h>
#include "ScatterBEV_kernels.h"
__global__ void scatterBEV_kernel(const float *pillar_features_data,
const float *coords_data, const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
float *spatial_feature_data)
{
int pillar_idx = blockIdx.x * PILLARS_PER_BLOCK + threadIdx.x;
int valid_pillars_inBlock = PILLARS_PER_BLOCK;
const int num_pillars = params_data[4];
int valid_blocks = (num_pillars+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK;
if(blockIdx.x >= valid_blocks) return;
if(blockIdx.x == (valid_blocks-1)) {
valid_pillars_inBlock = num_pillars % PILLARS_PER_BLOCK;
}
valid_pillars_inBlock = (valid_pillars_inBlock==0) ? PILLARS_PER_BLOCK : valid_pillars_inBlock;
__shared__ float pillarSM[PILLARS_PER_BLOCK][FEATURE_SIZE]; //pillar*64
for (int i = 0; i < valid_pillars_inBlock; i++)
{
pillarSM[i][threadIdx.x] = pillar_features_data[ (blockIdx.x * PILLARS_PER_BLOCK +i)*FEATURE_SIZE + threadIdx.x];
}
__syncthreads();
if(pillar_idx >= num_pillars) return;
float4 coord = ((const float4 *)coords_data)[pillar_idx];
int x = (int)coord.w;
int y = (int)coord.z;
for (int i = 0; i < FEATURE_SIZE; i++)
{
spatial_feature_data[i*featureY*featureX + y*featureX + x] = pillarSM[threadIdx.x][i];
}
}
void scatterBEV_kernel_launcher(const float *pillar_features_data,
const float *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
float *spatial_feature_data,
hipStream_t stream)
{
//std::cout<<"######## C++ scatterBEV_kernel_launcher"<<std::endl;
dim3 blocks( (featureX*featureY+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK);
dim3 threads(PILLARS_PER_BLOCK);
hipLaunchKernelGGL(( scatterBEV_kernel), dim3(blocks), dim3(threads), 0, stream,
pillar_features_data, coords_data, params_data, featureX, featureY, spatial_feature_data);
auto err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| f921477f89c5cfaf1e76f08ee589cab5effbe330.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cuda_runtime_api.h>
#include "ScatterBEV_kernels.h"
__global__ void scatterBEV_kernel(const float *pillar_features_data,
const float *coords_data, const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
float *spatial_feature_data)
{
int pillar_idx = blockIdx.x * PILLARS_PER_BLOCK + threadIdx.x;
int valid_pillars_inBlock = PILLARS_PER_BLOCK;
const int num_pillars = params_data[4];
int valid_blocks = (num_pillars+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK;
if(blockIdx.x >= valid_blocks) return;
if(blockIdx.x == (valid_blocks-1)) {
valid_pillars_inBlock = num_pillars % PILLARS_PER_BLOCK;
}
valid_pillars_inBlock = (valid_pillars_inBlock==0) ? PILLARS_PER_BLOCK : valid_pillars_inBlock;
__shared__ float pillarSM[PILLARS_PER_BLOCK][FEATURE_SIZE]; //pillar*64
for (int i = 0; i < valid_pillars_inBlock; i++)
{
pillarSM[i][threadIdx.x] = pillar_features_data[ (blockIdx.x * PILLARS_PER_BLOCK +i)*FEATURE_SIZE + threadIdx.x];
}
__syncthreads();
if(pillar_idx >= num_pillars) return;
float4 coord = ((const float4 *)coords_data)[pillar_idx];
int x = (int)coord.w;
int y = (int)coord.z;
for (int i = 0; i < FEATURE_SIZE; i++)
{
spatial_feature_data[i*featureY*featureX + y*featureX + x] = pillarSM[threadIdx.x][i];
}
}
void scatterBEV_kernel_launcher(const float *pillar_features_data,
const float *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
float *spatial_feature_data,
cudaStream_t stream)
{
//std::cout<<"######## C++ scatterBEV_kernel_launcher"<<std::endl;
dim3 blocks( (featureX*featureY+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK);
dim3 threads(PILLARS_PER_BLOCK);
scatterBEV_kernel<<<blocks, threads, 0, stream>>>
(pillar_features_data, coords_data, params_data, featureX, featureY, spatial_feature_data);
auto err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
2f2a0935181ca144d951cba11e8d5aeecf8a7df8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file generateBC2.cu
* \author Anush Krishnan (anush@bu.edu)
* \brief Implementation of the kernels to generate elements of the right hand-side
* of the Poisson solver.
*/
#include "generateBC2.h"
/**
* \namespace kernels
* \brief Contains all custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the bottom and top boundaries at the v-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param yminus bottom-boundary velocities
* \param yplus top-boundary velocities
* \param dx cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_v(real *bc2, real *yminus, real *yplus, real *dx, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>=nx)
return;
bc2[i] -= yminus[i+nx-1]*dx[i];
bc2[(ny-1)*nx + i] += yplus[i+nx-1]*dx[i];
}
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the left and right boundaries at the u-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param xminus left-boundary velocities
* \param xplus right-boundary velocities
* \param dy cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_u(real *bc2, real *xminus, real *xplus, real *dy, int nx, int ny)
{
int j = threadIdx.x + blockIdx.x*blockDim.x;
if(j>=ny)
return;
bc2[j*nx] -= xminus[j]*dy[j];
bc2[j*nx+nx-1] += xplus[j]*dy[j];
}
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the no-slip constraint at the body-point locations.
*
* \param bc2 array that contains boundary conditions
* \param uB x-component of the body-velocity
* \param vB y-component of the body-velcoity
* \param totalPoints number of body-points (all bodies included)
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_uvB(real *bc2, real *uB, real *vB, int totalPoints, int nx, int ny)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k>=totalPoints)
return;
bc2[nx*ny + k] = uB[k];
bc2[nx*ny + k + totalPoints] = vB[k];
}
} // end of namespace kernels
| 2f2a0935181ca144d951cba11e8d5aeecf8a7df8.cu | /***************************************************************************//**
* \file generateBC2.cu
* \author Anush Krishnan (anush@bu.edu)
* \brief Implementation of the kernels to generate elements of the right hand-side
* of the Poisson solver.
*/
#include "generateBC2.h"
/**
* \namespace kernels
* \brief Contains all custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the bottom and top boundaries at the v-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param yminus bottom-boundary velocities
* \param yplus top-boundary velocities
* \param dx cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_v(real *bc2, real *yminus, real *yplus, real *dx, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>=nx)
return;
bc2[i] -= yminus[i+nx-1]*dx[i];
bc2[(ny-1)*nx + i] += yplus[i+nx-1]*dx[i];
}
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the left and right boundaries at the u-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param xminus left-boundary velocities
* \param xplus right-boundary velocities
* \param dy cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_u(real *bc2, real *xminus, real *xplus, real *dy, int nx, int ny)
{
int j = threadIdx.x + blockIdx.x*blockDim.x;
if(j>=ny)
return;
bc2[j*nx] -= xminus[j]*dy[j];
bc2[j*nx+nx-1] += xplus[j]*dy[j];
}
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the no-slip constraint at the body-point locations.
*
* \param bc2 array that contains boundary conditions
* \param uB x-component of the body-velocity
* \param vB y-component of the body-velcoity
* \param totalPoints number of body-points (all bodies included)
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_uvB(real *bc2, real *uB, real *vB, int totalPoints, int nx, int ny)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k>=totalPoints)
return;
bc2[nx*ny + k] = uB[k];
bc2[nx*ny + k + totalPoints] = vB[k];
}
} // end of namespace kernels
|
59b90cf10ca1a8b058de6f4c3987cb88d3b13d5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.hpp"
#include <stdio.h>
#include "histogram_naive.hpp"
__global__
void histogram_thread_element_kernel(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist)
{
// Approaches:
// 1. One thread per element, writing into a single histogram. I can figure this out now.
// 2. One thread per bin, reading the entire array. I can figure this out now.
// 3. Something else
// histogram_thread_element_kernel: each thread handles one element and writes into one histogram.
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= arrayLength)
{
return;
}
float binSize = (lastEdge-firstEdge)/numBins;
int iBin = (x[idx]-firstEdge)/binSize;
if (iBin < 0)
{
iBin = 0;
}
if (iBin >= numBins)
{
iBin = numBins - 1;
}
atomicAdd(outHist + iBin, 1);
}
void histogram_thread_element(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist, int gridSize, int blockSize)
{
float* d_x;
int* d_histogram;
checkCudaErrors(hipMalloc(&d_x, arrayLength*sizeof(float)));
checkCudaErrors(hipMalloc(&d_histogram, numBins*sizeof(int)));
checkCudaErrors(hipMemcpy(d_x, x, arrayLength*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(d_histogram, 0, numBins*sizeof(int)));
hipLaunchKernelGGL(( histogram_thread_element_kernel), dim3(gridSize), dim3(blockSize), 0, 0, arrayLength, d_x, numBins, firstEdge, lastEdge, d_histogram);
checkCudaErrors(hipMemcpy(outHist, d_histogram, numBins*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_histogram));
}
__global__
void histogram_thread_bin_kernel(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist)
{
// Approaches:
// 1. One thread per element, writing into a single histogram. I can figure this out now.
// 2. One thread per bin, reading the entire array. I can figure this out now.
// 3. Something else
// histogram_thread_bin_kernel: each thread handles one bin
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= numBins)
{
return;
}
float binSize = (lastEdge-firstEdge)/numBins;
int myBinCount = 0;
for (int ii = 0; ii < arrayLength; ii++)
{
int iBin = (x[ii]-firstEdge)/binSize;
if (iBin < 0)
{
iBin = 0;
}
if (iBin >= numBins)
{
iBin = numBins - 1;
}
if (iBin == idx)
{
myBinCount++;
}
}
outHist[idx] = myBinCount;
}
void histogram_thread_bin(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist, int gridSize, int blockSize)
{
float* d_x;
int* d_histogram;
checkCudaErrors(hipMalloc(&d_x, arrayLength*sizeof(float)));
checkCudaErrors(hipMalloc(&d_histogram, numBins*sizeof(int)));
checkCudaErrors(hipMemcpy(d_x, x, arrayLength*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(d_histogram, 0, numBins*sizeof(int)));
hipLaunchKernelGGL(( histogram_thread_bin_kernel), dim3(gridSize), dim3(blockSize), 0, 0, arrayLength, d_x, numBins, firstEdge, lastEdge, d_histogram);
checkCudaErrors(hipMemcpy(outHist, d_histogram, numBins*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_histogram));
}
| 59b90cf10ca1a8b058de6f4c3987cb88d3b13d5b.cu | #include "utils.hpp"
#include <stdio.h>
#include "histogram_naive.hpp"
__global__
void histogram_thread_element_kernel(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist)
{
// Approaches:
// 1. One thread per element, writing into a single histogram. I can figure this out now.
// 2. One thread per bin, reading the entire array. I can figure this out now.
// 3. Something else
// histogram_thread_element_kernel: each thread handles one element and writes into one histogram.
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= arrayLength)
{
return;
}
float binSize = (lastEdge-firstEdge)/numBins;
int iBin = (x[idx]-firstEdge)/binSize;
if (iBin < 0)
{
iBin = 0;
}
if (iBin >= numBins)
{
iBin = numBins - 1;
}
atomicAdd(outHist + iBin, 1);
}
void histogram_thread_element(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist, int gridSize, int blockSize)
{
float* d_x;
int* d_histogram;
checkCudaErrors(cudaMalloc(&d_x, arrayLength*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_histogram, numBins*sizeof(int)));
checkCudaErrors(cudaMemcpy(d_x, x, arrayLength*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(d_histogram, 0, numBins*sizeof(int)));
histogram_thread_element_kernel<<<gridSize, blockSize>>>(arrayLength, d_x, numBins, firstEdge, lastEdge, d_histogram);
checkCudaErrors(cudaMemcpy(outHist, d_histogram, numBins*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_histogram));
}
__global__
void histogram_thread_bin_kernel(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist)
{
// Approaches:
// 1. One thread per element, writing into a single histogram. I can figure this out now.
// 2. One thread per bin, reading the entire array. I can figure this out now.
// 3. Something else
// histogram_thread_bin_kernel: each thread handles one bin
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= numBins)
{
return;
}
float binSize = (lastEdge-firstEdge)/numBins;
int myBinCount = 0;
for (int ii = 0; ii < arrayLength; ii++)
{
int iBin = (x[ii]-firstEdge)/binSize;
if (iBin < 0)
{
iBin = 0;
}
if (iBin >= numBins)
{
iBin = numBins - 1;
}
if (iBin == idx)
{
myBinCount++;
}
}
outHist[idx] = myBinCount;
}
void histogram_thread_bin(int arrayLength, const float* x, int numBins, float firstEdge, float lastEdge, int* outHist, int gridSize, int blockSize)
{
float* d_x;
int* d_histogram;
checkCudaErrors(cudaMalloc(&d_x, arrayLength*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_histogram, numBins*sizeof(int)));
checkCudaErrors(cudaMemcpy(d_x, x, arrayLength*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(d_histogram, 0, numBins*sizeof(int)));
histogram_thread_bin_kernel<<<gridSize, blockSize>>>(arrayLength, d_x, numBins, firstEdge, lastEdge, d_histogram);
checkCudaErrors(cudaMemcpy(outHist, d_histogram, numBins*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_histogram));
}
|
333a7cbb9829eae8c1a68cd3af549fea5b209b21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
/************* Functions **************/
__global__ void x2(float* x3, float x4, int x5) {
// begin generating kernel function for FILL of type Float
int x6 = gridDim.x * blockDim.x;
int x7 = threadIdx.x + blockIdx.x * blockDim.x;
while (x7 < x5) {
x3[x7] = x4;
x7 = x7 + x6;
}
// end generating kernel function for FILL of type Float
}
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x1, (size_t)(5 * sizeof(float))));
hipLaunchKernelGGL(( x2), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x1, 3.0, 5);
float* x8 = (float*)malloc(5 * sizeof(float));
CUDA_CALL(hipMemcpy(x8, x1, (size_t)(5 * sizeof(float)), hipMemcpyDeviceToHost));
printf("%f %f", x8[2], x8[3]);
CUDA_CALL(hipFree(x1));
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| 333a7cbb9829eae8c1a68cd3af549fea5b209b21.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
/************* Functions **************/
__global__ void x2(float* x3, float x4, int x5) {
// begin generating kernel function for FILL of type Float
int x6 = gridDim.x * blockDim.x;
int x7 = threadIdx.x + blockIdx.x * blockDim.x;
while (x7 < x5) {
x3[x7] = x4;
x7 = x7 + x6;
}
// end generating kernel function for FILL of type Float
}
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x1, (size_t)(5 * sizeof(float))));
x2<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x1, 3.0, 5);
float* x8 = (float*)malloc(5 * sizeof(float));
CUDA_CALL(cudaMemcpy(x8, x1, (size_t)(5 * sizeof(float)), cudaMemcpyDeviceToHost));
printf("%f %f", x8[2], x8[3]);
CUDA_CALL(cudaFree(x1));
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
e0ecc1a50c9a260d9c1562edd6cbbd5713ebe47f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include "../cuda/linalg.cu"
#include "../cuda/sort.cu"
#include "../cuda/fetchpair.cu"
#include "../cuda/outlier.cu"
#include "../cuda/gmm.cu"
#include "../cuda/pearson.cu"
#include "../cuda/spearman.cu"
#include "../cuda/similarity.cu"
typedef enum ClusteringMethod ClusteringMethod;
typedef enum CorrelationMethod CorrelationMethod;
struct PairwiseIndex
{
int x;
int y;
};
PairwiseIndex pairwise_index(size_t index)
{
// compute pairwise index from scalar index
size_t pos {0};
size_t x {0};
while ( pos + x <= index )
{
pos += x;
++x;
}
// return pairwise index
return {
static_cast<int>(x),
static_cast<int>(index - pos)
};
}
void operator++(PairwiseIndex& index)
{
if ( ++index.y >= index.x )
{
index.y = 0;
++index.x;
}
}
struct Pair
{
char K;
std::vector<char> labels;
std::vector<float> correlations;
};
#define CUDA_SAFE_CALL(ans) check((ans), #ans, __FILE__, __LINE__)
inline void check(hipError_t err, const char *func, const char *file, const int line)
{
if ( err != hipSuccess ) {
fprintf(stderr, "CUDA error at %s:%d\n", file, line);
fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);
exit(-1);
}
}
template<typename T>
T * CUDABuffer(size_t size)
{
T *ptr;
CUDA_SAFE_CALL(hipMallocManaged((void **)&ptr, size * sizeof(T)));
return ptr;
}
void load_dataframe(const char *filename, int *p_rows, int *p_cols, float **p_data)
{
// create input stream
std::ifstream in(filename);
// read sample names from first line
std::string line;
std::getline(in, line);
// determine number of samples
int cols {0};
std::stringstream ss(line);
while ( !ss.eof() )
{
std::string colname;
ss >> colname;
cols++;
}
// read data from input file
int rows {0};
std::vector<float> values;
while ( !in.eof() )
{
// read a line from the input file
std::getline(in, line);
std::stringstream ss(line);
// read row name
std::string rowname;
ss >> rowname;
// read data elements
for ( int i = 0; i < cols; i++ )
{
std::string token;
ss >> token;
// if token matches nan token then it as such
if ( token == "NA" )
{
values.push_back(NAN);
}
// else this is a normal floating point expression
else
{
float value;
ss >> value;
values.push_back(value);
}
}
// increment number of rows
rows++;
}
// initialize dataframe
float *data = CUDABuffer<float>(rows * cols);
memcpy(data, values.data(), rows * cols * sizeof(float));
// save outputs
*p_rows = rows;
*p_cols = cols;
*p_data = data;
}
int nextPowerTwo(int n)
{
int pow2 = 2;
while ( pow2 < n )
{
pow2 *= 2;
}
return pow2;
}
template<class T>
std::vector<T> makeVector(const T* data, int size)
{
std::vector<T> v(size);
memcpy(v.data(), data, size * sizeof(T));
return v;
}
int main(int argc, char **argv)
{
// parse command-line arguments
if ( argc != 2 )
{
fprintf(stderr, "usage: ./kinc-mini <infile>\n");
exit(-1);
}
const char *filename = argv[1];
// load dataframe
int geneSize;
int sampleSize;
float *expressions;
load_dataframe(filename, &geneSize, &sampleSize, &expressions);
printf("loaded dataframe (%d x %d)\n", geneSize, sampleSize);
// copy dataframe to GPU
CUDA_SAFE_CALL(hipMemPrefetchAsync(expressions, geneSize * sampleSize, 0, 0));
// initialize execution parameters
int globalWorkSize {4096};
int localWorkSize {32};
int minSamples {30};
float minExpression {-INFINITY};
float maxExpression {+INFINITY};
ClusteringMethod clusMethod {ClusteringMethod_GMM};
CorrelationMethod corrMethod {CorrelationMethod_Spearman};
int minClusters {1};
int maxClusters {5};
Criterion criterion {ICL};
bool removePreOutliers {true};
bool removePostOutliers {true};
// initialize buffers
int W {globalWorkSize};
int N {sampleSize};
int N_pow2 {nextPowerTwo(N)};
int K {maxClusters};
int2 * in_index = CUDABuffer<int2> (1 * W);
float * work_x = CUDABuffer<float> (N_pow2 * W);
float * work_y = CUDABuffer<float> (N_pow2 * W);
float2 * work_gmm_data = CUDABuffer<float2> (N * W);
char * work_gmm_labels = CUDABuffer<char> (N * W);
float * work_gmm_pi = CUDABuffer<float> (K * W);
float2 * work_gmm_mu = CUDABuffer<float2> (K * W);
float4 * work_gmm_sigma = CUDABuffer<float4> (K * W);
float4 * work_gmm_sigmaInv = CUDABuffer<float4> (K * W);
float * work_gmm_normalizer = CUDABuffer<float> (K * W);
float2 * work_gmm_MP = CUDABuffer<float2> (K * W);
int * work_gmm_counts = CUDABuffer<int> (K * W);
float * work_gmm_logpi = CUDABuffer<float> (K * W);
float * work_gmm_gamma = CUDABuffer<float> (N * K * W);
char * out_K = CUDABuffer<char> (1 * W);
char * out_labels = CUDABuffer<char> (N * W);
float * out_correlations = CUDABuffer<float> (K * W);
// initialize output
std::vector<Pair> pairs;
// iterate through all pairs
int workBlockStart {0};
int workBlockSize {32768};
PairwiseIndex index = pairwise_index(workBlockStart);
for ( int i = 0; i < workBlockSize; i += globalWorkSize )
{
printf("%8d %4d %4d\n", workBlockStart, index.x, index.y);
// write input buffers to device
int numPairs {min(globalWorkSize, workBlockSize - i)};
for ( int j = 0; j < numPairs; ++j )
{
in_index[j] = { index.x, index.y };
++index;
}
CUDA_SAFE_CALL(hipMemPrefetchAsync(in_index, W * sizeof(int2), 0, 0));
// execute similiarity kernel
hipLaunchKernelGGL(( Similarity_compute), dim3(globalWorkSize), dim3(localWorkSize), 0, 0,
clusMethod,
corrMethod,
removePreOutliers,
removePostOutliers,
numPairs,
expressions,
sampleSize,
in_index,
minExpression,
maxExpression,
minSamples,
minClusters,
maxClusters,
criterion,
work_x,
work_y,
work_gmm_data,
work_gmm_labels,
work_gmm_pi,
work_gmm_mu,
work_gmm_sigma,
work_gmm_sigmaInv,
work_gmm_normalizer,
work_gmm_MP,
work_gmm_counts,
work_gmm_logpi,
work_gmm_gamma,
out_K,
out_labels,
out_correlations
);
CUDA_SAFE_CALL(hipGetLastError());
// read results from device
CUDA_SAFE_CALL(hipMemPrefetchAsync(out_K, W * sizeof(char), hipCpuDeviceId, 0));
CUDA_SAFE_CALL(hipMemPrefetchAsync(out_labels, W * N * sizeof(char), hipCpuDeviceId, 0));
CUDA_SAFE_CALL(hipMemPrefetchAsync(out_correlations, W * K * sizeof(float), hipCpuDeviceId, 0));
// wait for everything to finish
CUDA_SAFE_CALL(hipStreamSynchronize(0));
// save results
for ( int j = 0; j < numPairs; ++j )
{
// get pointers to the cluster labels and correlations for this pair
const char *labels = &out_labels[j * sampleSize];
const float *correlations = &out_correlations[j * maxClusters];
Pair pair;
// save the number of clusters
pair.K = out_K[j];
// save the cluster labels and correlations (if the pair was able to be processed)
if ( pair.K > 0 )
{
pair.labels = makeVector(labels, sampleSize);
pair.correlations = makeVector(correlations, maxClusters);
}
pairs.push_back(pair);
}
}
return 0;
}
| e0ecc1a50c9a260d9c1562edd6cbbd5713ebe47f.cu | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include "../cuda/linalg.cu"
#include "../cuda/sort.cu"
#include "../cuda/fetchpair.cu"
#include "../cuda/outlier.cu"
#include "../cuda/gmm.cu"
#include "../cuda/pearson.cu"
#include "../cuda/spearman.cu"
#include "../cuda/similarity.cu"
typedef enum ClusteringMethod ClusteringMethod;
typedef enum CorrelationMethod CorrelationMethod;
struct PairwiseIndex
{
int x;
int y;
};
PairwiseIndex pairwise_index(size_t index)
{
// compute pairwise index from scalar index
size_t pos {0};
size_t x {0};
while ( pos + x <= index )
{
pos += x;
++x;
}
// return pairwise index
return {
static_cast<int>(x),
static_cast<int>(index - pos)
};
}
void operator++(PairwiseIndex& index)
{
if ( ++index.y >= index.x )
{
index.y = 0;
++index.x;
}
}
struct Pair
{
char K;
std::vector<char> labels;
std::vector<float> correlations;
};
#define CUDA_SAFE_CALL(ans) check((ans), #ans, __FILE__, __LINE__)
inline void check(cudaError_t err, const char *func, const char *file, const int line)
{
if ( err != cudaSuccess ) {
fprintf(stderr, "CUDA error at %s:%d\n", file, line);
fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);
exit(-1);
}
}
template<typename T>
T * CUDABuffer(size_t size)
{
T *ptr;
CUDA_SAFE_CALL(cudaMallocManaged((void **)&ptr, size * sizeof(T)));
return ptr;
}
void load_dataframe(const char *filename, int *p_rows, int *p_cols, float **p_data)
{
// create input stream
std::ifstream in(filename);
// read sample names from first line
std::string line;
std::getline(in, line);
// determine number of samples
int cols {0};
std::stringstream ss(line);
while ( !ss.eof() )
{
std::string colname;
ss >> colname;
cols++;
}
// read data from input file
int rows {0};
std::vector<float> values;
while ( !in.eof() )
{
// read a line from the input file
std::getline(in, line);
std::stringstream ss(line);
// read row name
std::string rowname;
ss >> rowname;
// read data elements
for ( int i = 0; i < cols; i++ )
{
std::string token;
ss >> token;
// if token matches nan token then it as such
if ( token == "NA" )
{
values.push_back(NAN);
}
// else this is a normal floating point expression
else
{
float value;
ss >> value;
values.push_back(value);
}
}
// increment number of rows
rows++;
}
// initialize dataframe
float *data = CUDABuffer<float>(rows * cols);
memcpy(data, values.data(), rows * cols * sizeof(float));
// save outputs
*p_rows = rows;
*p_cols = cols;
*p_data = data;
}
int nextPowerTwo(int n)
{
int pow2 = 2;
while ( pow2 < n )
{
pow2 *= 2;
}
return pow2;
}
template<class T>
std::vector<T> makeVector(const T* data, int size)
{
std::vector<T> v(size);
memcpy(v.data(), data, size * sizeof(T));
return v;
}
int main(int argc, char **argv)
{
// parse command-line arguments
if ( argc != 2 )
{
fprintf(stderr, "usage: ./kinc-mini <infile>\n");
exit(-1);
}
const char *filename = argv[1];
// load dataframe
int geneSize;
int sampleSize;
float *expressions;
load_dataframe(filename, &geneSize, &sampleSize, &expressions);
printf("loaded dataframe (%d x %d)\n", geneSize, sampleSize);
// copy dataframe to GPU
CUDA_SAFE_CALL(cudaMemPrefetchAsync(expressions, geneSize * sampleSize, 0, 0));
// initialize execution parameters
int globalWorkSize {4096};
int localWorkSize {32};
int minSamples {30};
float minExpression {-INFINITY};
float maxExpression {+INFINITY};
ClusteringMethod clusMethod {ClusteringMethod_GMM};
CorrelationMethod corrMethod {CorrelationMethod_Spearman};
int minClusters {1};
int maxClusters {5};
Criterion criterion {ICL};
bool removePreOutliers {true};
bool removePostOutliers {true};
// initialize buffers
int W {globalWorkSize};
int N {sampleSize};
int N_pow2 {nextPowerTwo(N)};
int K {maxClusters};
int2 * in_index = CUDABuffer<int2> (1 * W);
float * work_x = CUDABuffer<float> (N_pow2 * W);
float * work_y = CUDABuffer<float> (N_pow2 * W);
float2 * work_gmm_data = CUDABuffer<float2> (N * W);
char * work_gmm_labels = CUDABuffer<char> (N * W);
float * work_gmm_pi = CUDABuffer<float> (K * W);
float2 * work_gmm_mu = CUDABuffer<float2> (K * W);
float4 * work_gmm_sigma = CUDABuffer<float4> (K * W);
float4 * work_gmm_sigmaInv = CUDABuffer<float4> (K * W);
float * work_gmm_normalizer = CUDABuffer<float> (K * W);
float2 * work_gmm_MP = CUDABuffer<float2> (K * W);
int * work_gmm_counts = CUDABuffer<int> (K * W);
float * work_gmm_logpi = CUDABuffer<float> (K * W);
float * work_gmm_gamma = CUDABuffer<float> (N * K * W);
char * out_K = CUDABuffer<char> (1 * W);
char * out_labels = CUDABuffer<char> (N * W);
float * out_correlations = CUDABuffer<float> (K * W);
// initialize output
std::vector<Pair> pairs;
// iterate through all pairs
int workBlockStart {0};
int workBlockSize {32768};
PairwiseIndex index = pairwise_index(workBlockStart);
for ( int i = 0; i < workBlockSize; i += globalWorkSize )
{
printf("%8d %4d %4d\n", workBlockStart, index.x, index.y);
// write input buffers to device
int numPairs {min(globalWorkSize, workBlockSize - i)};
for ( int j = 0; j < numPairs; ++j )
{
in_index[j] = { index.x, index.y };
++index;
}
CUDA_SAFE_CALL(cudaMemPrefetchAsync(in_index, W * sizeof(int2), 0, 0));
// execute similiarity kernel
Similarity_compute<<<globalWorkSize, localWorkSize>>>(
clusMethod,
corrMethod,
removePreOutliers,
removePostOutliers,
numPairs,
expressions,
sampleSize,
in_index,
minExpression,
maxExpression,
minSamples,
minClusters,
maxClusters,
criterion,
work_x,
work_y,
work_gmm_data,
work_gmm_labels,
work_gmm_pi,
work_gmm_mu,
work_gmm_sigma,
work_gmm_sigmaInv,
work_gmm_normalizer,
work_gmm_MP,
work_gmm_counts,
work_gmm_logpi,
work_gmm_gamma,
out_K,
out_labels,
out_correlations
);
CUDA_SAFE_CALL(cudaGetLastError());
// read results from device
CUDA_SAFE_CALL(cudaMemPrefetchAsync(out_K, W * sizeof(char), cudaCpuDeviceId, 0));
CUDA_SAFE_CALL(cudaMemPrefetchAsync(out_labels, W * N * sizeof(char), cudaCpuDeviceId, 0));
CUDA_SAFE_CALL(cudaMemPrefetchAsync(out_correlations, W * K * sizeof(float), cudaCpuDeviceId, 0));
// wait for everything to finish
CUDA_SAFE_CALL(cudaStreamSynchronize(0));
// save results
for ( int j = 0; j < numPairs; ++j )
{
// get pointers to the cluster labels and correlations for this pair
const char *labels = &out_labels[j * sampleSize];
const float *correlations = &out_correlations[j * maxClusters];
Pair pair;
// save the number of clusters
pair.K = out_K[j];
// save the cluster labels and correlations (if the pair was able to be processed)
if ( pair.K > 0 )
{
pair.labels = makeVector(labels, sampleSize);
pair.correlations = makeVector(correlations, maxClusters);
}
pairs.push_back(pair);
}
}
return 0;
}
|
e5f804f8f6adab6da466523b0e8e6e79d142bde6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "lzss_gpu_help.h"
#include "common.h"
int t;
int main(int argc, char **argv)
{
hipDeviceSynchronize();
char *savename = read_string( argc, argv, "-o", NULL );
char *compname = read_string( argc, argv, "-c", NULL );
char *dcmpname = read_string( argc, argv, "-d", NULL );
if( find_option( argc, argv, "-h" ) >= 0 ||
savename == NULL ||
(compname == NULL && dcmpname == NULL) ||
(compname != NULL && dcmpname != NULL) )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-c <filename> compression input file\n" );
printf( "-d <filename> decompression input file\n" );
printf( "-o <filename> output file\n" );
printf( "-t print timings\n" );
return 0;
}
double time;
t = (find_option( argc, argv, "-t" ) >= 0);
FILE *input = compname ? fopen(compname,"r") : fopen(dcmpname,"r");
if(!input)
{
return 0;
}
FILE *fsave = fopen(savename,"w");
if(t)
{
time = read_timer();
}
uint64_t fsize;
/* Preprocessing */
if(compname)
{
fsize = file_size(compname);
decomp_t *decomp = (decomp_t*) malloc(fsize*sizeof(uint8_t) + sizeof(decomp_t));
size_t read = fread(decomp->content, fsize, sizeof(uint8_t),input);
decomp->content_len = fsize;
compressed_t *comp = lzss_compress(decomp);
uint64_t total_len = comp->content_len + BITS_TO_CHARS(comp->flag_bits);
size_t wrote = fwrite(comp, sizeof(uint8_t), sizeof(compressed_t) + total_len * sizeof(uint8_t), fsave);
if(t)
{
// fprintf(stdout,"Compression ratio: %lf\n", (1.0*fsize)/(1.0*wrote + 1.0e-7));
// fprintf(stdout,"Deflation: %.0lf%%\n", (1.0*wrote)/(1.0*fsize + 1.0e-7)*100);
fprintf(stdout,"%ld, ",(long int) fsize);
fprintf(stdout,"%lf, ", (1.0*wrote)/(1.0*fsize + 1.0e-7));
}
}
else
{
fsize = file_size(dcmpname);
compressed_t *comp = (compressed_t*) malloc(fsize*sizeof(uint8_t));
size_t read = fread(comp, fsize, sizeof(uint8_t),input);
decomp_t *decomp = lzss_decomp(comp);
uint64_t total_len = decomp->content_len;
size_t wrote = fwrite(decomp->content, sizeof(uint8_t), total_len * sizeof(uint8_t), fsave);
}
if(t)
{
time = read_timer() - time;
// fprintf(stdout,"Input Size: %ld\n",(long int) fsize);
fprintf(stdout," %lf, ",time);
if(!compname) fprintf(stdout,"\n");
}
return 0;
}
| e5f804f8f6adab6da466523b0e8e6e79d142bde6.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <cuda.h>
#include "lzss_gpu_help.h"
#include "common.h"
int t;
int main(int argc, char **argv)
{
cudaDeviceSynchronize();
char *savename = read_string( argc, argv, "-o", NULL );
char *compname = read_string( argc, argv, "-c", NULL );
char *dcmpname = read_string( argc, argv, "-d", NULL );
if( find_option( argc, argv, "-h" ) >= 0 ||
savename == NULL ||
(compname == NULL && dcmpname == NULL) ||
(compname != NULL && dcmpname != NULL) )
{
printf( "Options:\n" );
printf( "-h to see this help\n" );
printf( "-c <filename> compression input file\n" );
printf( "-d <filename> decompression input file\n" );
printf( "-o <filename> output file\n" );
printf( "-t print timings\n" );
return 0;
}
double time;
t = (find_option( argc, argv, "-t" ) >= 0);
FILE *input = compname ? fopen(compname,"r") : fopen(dcmpname,"r");
if(!input)
{
return 0;
}
FILE *fsave = fopen(savename,"w");
if(t)
{
time = read_timer();
}
uint64_t fsize;
/* Preprocessing */
if(compname)
{
fsize = file_size(compname);
decomp_t *decomp = (decomp_t*) malloc(fsize*sizeof(uint8_t) + sizeof(decomp_t));
size_t read = fread(decomp->content, fsize, sizeof(uint8_t),input);
decomp->content_len = fsize;
compressed_t *comp = lzss_compress(decomp);
uint64_t total_len = comp->content_len + BITS_TO_CHARS(comp->flag_bits);
size_t wrote = fwrite(comp, sizeof(uint8_t), sizeof(compressed_t) + total_len * sizeof(uint8_t), fsave);
if(t)
{
// fprintf(stdout,"Compression ratio: %lf\n", (1.0*fsize)/(1.0*wrote + 1.0e-7));
// fprintf(stdout,"Deflation: %.0lf%%\n", (1.0*wrote)/(1.0*fsize + 1.0e-7)*100);
fprintf(stdout,"%ld, ",(long int) fsize);
fprintf(stdout,"%lf, ", (1.0*wrote)/(1.0*fsize + 1.0e-7));
}
}
else
{
fsize = file_size(dcmpname);
compressed_t *comp = (compressed_t*) malloc(fsize*sizeof(uint8_t));
size_t read = fread(comp, fsize, sizeof(uint8_t),input);
decomp_t *decomp = lzss_decomp(comp);
uint64_t total_len = decomp->content_len;
size_t wrote = fwrite(decomp->content, sizeof(uint8_t), total_len * sizeof(uint8_t), fsave);
}
if(t)
{
time = read_timer() - time;
// fprintf(stdout,"Input Size: %ld\n",(long int) fsize);
fprintf(stdout," %lf, ",time);
if(!compname) fprintf(stdout,"\n");
}
return 0;
}
|
5cae7331fd55eaa2dca95d2176b55d6ada3f12f2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* FLAME GPU v 1.5.X for CUDA 9
* Copyright University of Sheffield.
* Original Author: Dr Paul Richmond (user contributions tracked on https://github.com/FLAMEGPU/FLAMEGPU)
* Contact: p.richmond@sheffield.ac.uk (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <errno.h>
#ifdef VISUALISATION
#include <GL/glew.h>
#include <GL/glut.h>
#endif
#include "header.h"
#if defined(PROFILE)
unsigned int g_profile_colour_id = 0;
#endif
/* IO Variables*/
char inputfile[100]; /**< Input path char buffer*/
char outputpath[1000]; /**< Output path char buffer*/
// Define the default value indicating if XML output should be produced or not.
#define OUTPUT_TO_XML 0
#define HELP_OPTION_SHORT "-h"
#define HELP_OPTION_LONG "--help"
/** checkUsage
* Function to check the correct number of arguments
* @param arc main argument count
* @param argv main argument values
* @return true if usage is correct, otherwise false
*/
int checkUsage(int argc, char** argv) {
// Initalise return value.
int retval = true;
// Get the EXE name.
char * executable = nullptr;
size_t i = 0;
size_t last = 0;
while (argv[0][i] != '\0')
{
/* For windows directories */
if (argv[0][i] == '\\') last = i + 1;
/* For unix directories */
if (argv[0][i] == '/') last = i + 1;
i++;
}
size_t substrLen = strlen(argv[0]) - last;
executable = (char*)malloc(substrLen + 1);
if (executable != nullptr) {
executable[substrLen] = '\0';
strncpy(executable, argv[0] + last, substrLen);
}
// Iterate each argument, looking for the help flag.
bool helpFlagFound = false;
for(int index = 1; index < argc; index++){
if(strcmp(HELP_OPTION_SHORT, argv[index]) == 0 || strcmp(HELP_OPTION_LONG, argv[index]) == 0){
helpFlagFound = true;
break;
}
}
//Check usage
#ifdef VISUALISATION
printf("FLAMEGPU Visualisation mode\n");
if(helpFlagFound || argc < 2 || argc > 3)
{
printf("\nusage: %s [-h] [--help] input_path [cuda_device_id]\n", executable != nullptr ? executable : "main");
printf("\n");
printf("required arguments:\n");
printf(" input_path Path to initial states XML file OR path to output XML directory\n");
printf("\n");
printf("options arguments:\n");
printf(" -h, --help Output this help message.\n");
printf(" cuda_device_id CUDA device ID to be used. Default is 0\n");
// Set the appropriate return value
retval = false;
}
#else
printf("FLAMEGPU Console mode\n");
if(helpFlagFound || argc < 3 || argc > 5)
{
printf("\nusage: %s [-h] [--help] input_path num_iterations [cuda_device_id] [XML_output_override]\n", executable != nullptr ? executable : "main");
printf("\n");
printf("required arguments:\n");
printf(" input_path Path to initial states XML file OR path to output XML directory\n");
printf(" num_iterations Number of simulation iterations\n");
printf("\n");
printf("options arguments:\n");
printf(" -h, --help Output this help message.\n");
printf(" cuda_device_id CUDA device ID to be used. Default is 0.\n");
printf(" XML_output_frequency Frequency of XML output\n");
printf(" 0 = No output\n");
printf(" 1 = Every 1 iteration\n");
printf(" 5 = Every 5 iterations\n");
printf(" Default value: %d\n", OUTPUT_TO_XML);
// Set the appropriate return value
retval = false;
}
#endif
// Free malloced memory
free(executable);
executable = nullptr;
// return the appropriate code.
return retval;
}
/** getOutputDir
* Function which gets the global char array contianign the path for output
* @return char array containing relative path to output locaiton
*/
const char* getOutputDir(){
return outputpath;
}
/** parentDirectoryOfPath
* Function which given a path removes the last segment, copying into a pre-defined buffer.
* @param parent pre allocated buffer for the shoretened path
* @param path input path to be shortented
*/
void parentDirectoryOfPath(char * parent, char * path) {
int i = 0;
int lastd = -1;
while (path[i] != '\0')
{
/* For windows directories */
if (path[i] == '\\') lastd = i;
/* For unix directories */
if (path[i] == '/') lastd = i;
i++;
}
strcpy(parent, path);
//parent[lastd + 1] = '\0';
// Replace the traling slash, as files and directories cannot have the same name.
parent[lastd + 1] = '\0';
}
/** getPathProperties
* Function to get information about a filepath, if it exists, is a file or is a directory
* @param path path to be checked
* @param isFile returned boolean indicating if the path points to a file.
* @param isDir return boolean indicating if the path points to a directory.
* @return boolean indicating if the path exists.
*/
bool getPathProperties(char * path, bool * isFile, bool * isDir) {
bool fileExists = false;
// Initialse bools to false.
*isFile = false;
*isDir = false;
// Buffer for stat output.
struct stat statBuf {0};
// Use stat to query the path information.
int statResult = stat(path, &statBuf);
// If stat was successfull
if (statResult == 0) {
// Update return values indicating if the path is a file or a directory.
*isDir = (statBuf.st_mode & S_IFDIR) != 0;
*isFile = (statBuf.st_mode & S_IFREG) != 0;
fileExists = *isDir || *isFile;
}
// Otherwise if stat did report an errr.
else {
// If the file does not exist, set this and continue.
if (errno == ENOENT) {
fileExists = false;
}
// For any other errors, we should abort.
else {
fprintf(stderr, "Error: An unknown error occured while processing file infomration.\n");
fflush(stdout);
exit(EXIT_FAILURE);
}
}
// Return if the file exists or not.
return fileExists;
}
/** setFilePaths
* Function to set global variables for the input XML file and its directory location
*@param input input path of model xml file
*/
void setFilePaths(char* input){
PROFILE_SCOPED_RANGE("setFilePaths");
// Get infomration about the inputpath file.
bool inputIsFile = false;
bool inputIsDir = false;
bool inputExists = getPathProperties(input, &inputIsFile, &inputIsDir);
// If input exists:
if (inputExists) {
// If it is a file
if (inputIsFile) {
//Copy input file, and proceed as normal.
strcpy(inputfile, input);
// We must get the parent directory as the output directory.
parentDirectoryOfPath(outputpath, inputfile);
}
// Otherwise it is a directory
else {
// We do not have an input file., but use this as the directory.
inputfile[0] = '\0';
strcpy(outputpath, input);
}
}
// Otherwise if the input file does not exist
else {
// The input path is empty.
inputfile[0] = '\0';
// Try to find a parent directory.
parentDirectoryOfPath(outputpath, input);
// Check if the parent directory exists.
bool dirIsFile = false;
bool dirIsDir = false;
bool dirExists = getPathProperties(outputpath, &dirIsFile, &dirIsDir);
// If the dir exists
if (dirExists) {
// IF the dir is not a directory, it is a file. Abort.
if (!dirIsDir || dirIsFile) {
printf("Error: outputpath `%s` exists, but it is not a directory.\n", outputpath);
exit(EXIT_FAILURE);
}
else {
// Otherwise the parent directory exists and is a directory.
printf("Warning: `%s` does not exist using parent directory for output.\n", input);
}
}
else {
// If the directory does not exist, use the working directory.
printf("Warning: Parent directory `%s` does not exist. Using current working directory for output.\n", outputpath);
outputpath[0] = '\0';
}
}
printf("Initial states: %s\n", inputfile[0] != '\0' ? inputfile : "(none)");
printf("Output dir: %s\n", outputpath[0] != '\0' ? outputpath : "(cwd)");
}
int getOutputXMLFrequency(int argc, char**argv){
#ifdef VISUALISATION
// If visualisation mode is set, we do not output.
return 0;
#else
// Initialise to #defined default
int outputFrequency = OUTPUT_TO_XML;
// If console mode is set and we have the right number of arguments, use the relevant index.
if (argc >= 5){
outputFrequency = (int) atoi(argv[4]);
if(outputFrequency <= 0){
outputFrequency = 0;
}
}
return outputFrequency;
#endif
}
void initCUDA(int argc, char** argv){
PROFILE_SCOPED_RANGE("initCUDA");
hipError_t cudaStatus;
int device;
int device_count;
//default device
device = 0;
cudaStatus = hipGetDeviceCount(&device_count);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Error finding CUDA devices! Do you have a CUDA-capable GPU installed?\n");
exit(EXIT_FAILURE);
}
if (device_count == 0){
fprintf(stderr, "Error no CUDA devices found!\n");
exit(EXIT_FAILURE);
}
#ifdef VISUALISATION
if (argc >= 3){
device = atoi(argv[2]);
}
#else
if (argc >= 4){
device = atoi(argv[3]);
}
#endif
if (device >= device_count){
fprintf(stderr, "Error selecting CUDA device! Device id '%d' is not found?\n", device);
exit(EXIT_FAILURE);
}
// Select device
cudaStatus = hipSetDevice(device);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Error setting CUDA device!\n");
exit(EXIT_FAILURE);
}
// Get device properties.
hipDeviceProp_t props;
cudaStatus = hipGetDeviceProperties(&props, device);
if(cudaStatus == hipSuccess){
#ifdef _MSC_VER
const char * driverMode = props.tccDriver ? "TCC" : "WDDM";
#else
const char * driverMode = "Linux";
#endif
fprintf(stdout, "GPU %d: %s, SM%d%d, %s, pciBusId %d\n", device, props.name, props.major, props.minor, driverMode, props.pciBusID);
} else {
fprintf(stderr, "Error Accessing Cuda Device properties for GPU %d\n", device);
}
hipFree(0);
}
void runConsoleWithoutXMLOutput(int iterations){
PROFILE_SCOPED_RANGE("runConsoleWithoutXMLOutput");
// Iteratively tun the correct number of iterations.
for (int i=0; i< iterations; i++)
{
printf("Processing Simulation Step %i\n", i+1);
//single simulation iteration
singleIteration();
}
}
void runConsoleWithXMLOutput(int iterations, int outputFrequency){
PROFILE_SCOPED_RANGE("runConsoleWithXMLOutput");
// Iteratively tun the correct number of iterations.
for (int i=0; i< iterations; i++)
{
printf("Processing Simulation Step %i\n", i+1);
//single simulation iteration
singleIteration();
// Save the iteration data to disk
if((i+1) % outputFrequency == 0){
saveIterationData(outputpath, i+1, get_host_Person_default_agents(), get_device_Person_default_agents(), get_agent_Person_default_count(),get_host_Person_s2_agents(), get_device_Person_s2_agents(), get_agent_Person_s2_count(),get_host_TBAssignment_tbdefault_agents(), get_device_TBAssignment_tbdefault_agents(), get_agent_TBAssignment_tbdefault_count(),get_host_Household_hhdefault_agents(), get_device_Household_hhdefault_agents(), get_agent_Household_hhdefault_count(),get_host_HouseholdMembership_hhmembershipdefault_agents(), get_device_HouseholdMembership_hhmembershipdefault_agents(), get_agent_HouseholdMembership_hhmembershipdefault_count(),get_host_Church_chudefault_agents(), get_device_Church_chudefault_agents(), get_agent_Church_chudefault_count(),get_host_ChurchMembership_chumembershipdefault_agents(), get_device_ChurchMembership_chumembershipdefault_agents(), get_agent_ChurchMembership_chumembershipdefault_count(),get_host_Transport_trdefault_agents(), get_device_Transport_trdefault_agents(), get_agent_Transport_trdefault_count(),get_host_TransportMembership_trmembershipdefault_agents(), get_device_TransportMembership_trmembershipdefault_agents(), get_agent_TransportMembership_trmembershipdefault_count(),get_host_Clinic_cldefault_agents(), get_device_Clinic_cldefault_agents(), get_agent_Clinic_cldefault_count(),get_host_Workplace_wpdefault_agents(), get_device_Workplace_wpdefault_agents(), get_agent_Workplace_wpdefault_count(),get_host_WorkplaceMembership_wpmembershipdefault_agents(), get_device_WorkplaceMembership_wpmembershipdefault_agents(), get_agent_WorkplaceMembership_wpmembershipdefault_count(),get_host_Bar_bdefault_agents(), get_device_Bar_bdefault_agents(), get_agent_Bar_bdefault_count(),get_host_School_schdefault_agents(), get_device_School_schdefault_agents(), get_agent_School_schdefault_count(),get_host_SchoolMembership_schmembershipdefault_agents(), get_device_SchoolMembership_schmembershipdefault_agents(), get_agent_SchoolMembership_schmembershipdefault_count());
printf("Iteration %i Saved to XML\n", i+1);
}
}
// If we did not yet output the final iteration, output the final iteration.
if(iterations % outputFrequency != 0){
saveIterationData(outputpath, iterations, get_host_Person_default_agents(), get_device_Person_default_agents(), get_agent_Person_default_count(),get_host_Person_s2_agents(), get_device_Person_s2_agents(), get_agent_Person_s2_count(),get_host_TBAssignment_tbdefault_agents(), get_device_TBAssignment_tbdefault_agents(), get_agent_TBAssignment_tbdefault_count(),get_host_Household_hhdefault_agents(), get_device_Household_hhdefault_agents(), get_agent_Household_hhdefault_count(),get_host_HouseholdMembership_hhmembershipdefault_agents(), get_device_HouseholdMembership_hhmembershipdefault_agents(), get_agent_HouseholdMembership_hhmembershipdefault_count(),get_host_Church_chudefault_agents(), get_device_Church_chudefault_agents(), get_agent_Church_chudefault_count(),get_host_ChurchMembership_chumembershipdefault_agents(), get_device_ChurchMembership_chumembershipdefault_agents(), get_agent_ChurchMembership_chumembershipdefault_count(),get_host_Transport_trdefault_agents(), get_device_Transport_trdefault_agents(), get_agent_Transport_trdefault_count(),get_host_TransportMembership_trmembershipdefault_agents(), get_device_TransportMembership_trmembershipdefault_agents(), get_agent_TransportMembership_trmembershipdefault_count(),get_host_Clinic_cldefault_agents(), get_device_Clinic_cldefault_agents(), get_agent_Clinic_cldefault_count(),get_host_Workplace_wpdefault_agents(), get_device_Workplace_wpdefault_agents(), get_agent_Workplace_wpdefault_count(),get_host_WorkplaceMembership_wpmembershipdefault_agents(), get_device_WorkplaceMembership_wpmembershipdefault_agents(), get_agent_WorkplaceMembership_wpmembershipdefault_count(),get_host_Bar_bdefault_agents(), get_device_Bar_bdefault_agents(), get_agent_Bar_bdefault_count(),get_host_School_schdefault_agents(), get_device_School_schdefault_agents(), get_agent_School_schdefault_count(),get_host_SchoolMembership_schmembershipdefault_agents(), get_device_SchoolMembership_schmembershipdefault_agents(), get_agent_SchoolMembership_schmembershipdefault_count());
printf("Iteration %i Saved to XML\n", iterations);
}
}
/**
* Program main (Handles arguments)
*/
int main( int argc, char** argv)
{
hipError_t cudaStatus;
//check usage mode
if (!checkUsage(argc, argv))
exit(EXIT_FAILURE);
//get the directory paths
setFilePaths(argv[1]);
//determine frequency we want to output to xml.
int outputXMLFrequency = getOutputXMLFrequency(argc, argv);
//initialise CUDA
initCUDA(argc, argv);
#ifdef VISUALISATION
//Init visualisation must be done before simulation init
initVisualisation();
#endif
//initialise the simulation
initialise(inputfile);
#ifdef VISUALISATION
runVisualisation();
exit(EXIT_SUCCESS);
#else
//Benchmark simulation
hipEvent_t start, stop;
float milliseconds = 0;
//create timing events
hipEventCreate(&start);
hipEventCreate(&stop);
//Get the number of iterations
int iterations = atoi(argv[2]);
if (iterations <= 0)
{
printf("Second argument must be a positive integer (Number of Iterations)\n");
exit(EXIT_FAILURE);
}
//start timing
hipEventRecord(start);
// Launch the main loop with / without xml output.
if(outputXMLFrequency > 0){
runConsoleWithXMLOutput(iterations, outputXMLFrequency);
} else {
runConsoleWithoutXMLOutput(iterations);
}
//CUDA stop timing
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf( "Total Processing time: %f (ms)\n", milliseconds);
#endif
cleanup();
PROFILE_PUSH_RANGE("hipDeviceReset");
cudaStatus = hipDeviceReset();
PROFILE_POP_RANGE();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Error resetting the device!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| 5cae7331fd55eaa2dca95d2176b55d6ada3f12f2.cu |
/*
* FLAME GPU v 1.5.X for CUDA 9
* Copyright University of Sheffield.
* Original Author: Dr Paul Richmond (user contributions tracked on https://github.com/FLAMEGPU/FLAMEGPU)
* Contact: p.richmond@sheffield.ac.uk (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <errno.h>
#ifdef VISUALISATION
#include <GL/glew.h>
#include <GL/glut.h>
#endif
#include "header.h"
#if defined(PROFILE)
unsigned int g_profile_colour_id = 0;
#endif
/* IO Variables*/
char inputfile[100]; /**< Input path char buffer*/
char outputpath[1000]; /**< Output path char buffer*/
// Define the default value indicating if XML output should be produced or not.
#define OUTPUT_TO_XML 0
#define HELP_OPTION_SHORT "-h"
#define HELP_OPTION_LONG "--help"
/** checkUsage
* Function to check the correct number of arguments
* @param arc main argument count
* @param argv main argument values
* @return true if usage is correct, otherwise false
*/
int checkUsage(int argc, char** argv) {
// Initalise return value.
int retval = true;
// Get the EXE name.
char * executable = nullptr;
size_t i = 0;
size_t last = 0;
while (argv[0][i] != '\0')
{
/* For windows directories */
if (argv[0][i] == '\\') last = i + 1;
/* For unix directories */
if (argv[0][i] == '/') last = i + 1;
i++;
}
size_t substrLen = strlen(argv[0]) - last;
executable = (char*)malloc(substrLen + 1);
if (executable != nullptr) {
executable[substrLen] = '\0';
strncpy(executable, argv[0] + last, substrLen);
}
// Iterate each argument, looking for the help flag.
bool helpFlagFound = false;
for(int index = 1; index < argc; index++){
if(strcmp(HELP_OPTION_SHORT, argv[index]) == 0 || strcmp(HELP_OPTION_LONG, argv[index]) == 0){
helpFlagFound = true;
break;
}
}
//Check usage
#ifdef VISUALISATION
printf("FLAMEGPU Visualisation mode\n");
if(helpFlagFound || argc < 2 || argc > 3)
{
printf("\nusage: %s [-h] [--help] input_path [cuda_device_id]\n", executable != nullptr ? executable : "main");
printf("\n");
printf("required arguments:\n");
printf(" input_path Path to initial states XML file OR path to output XML directory\n");
printf("\n");
printf("options arguments:\n");
printf(" -h, --help Output this help message.\n");
printf(" cuda_device_id CUDA device ID to be used. Default is 0\n");
// Set the appropriate return value
retval = false;
}
#else
printf("FLAMEGPU Console mode\n");
if(helpFlagFound || argc < 3 || argc > 5)
{
printf("\nusage: %s [-h] [--help] input_path num_iterations [cuda_device_id] [XML_output_override]\n", executable != nullptr ? executable : "main");
printf("\n");
printf("required arguments:\n");
printf(" input_path Path to initial states XML file OR path to output XML directory\n");
printf(" num_iterations Number of simulation iterations\n");
printf("\n");
printf("options arguments:\n");
printf(" -h, --help Output this help message.\n");
printf(" cuda_device_id CUDA device ID to be used. Default is 0.\n");
printf(" XML_output_frequency Frequency of XML output\n");
printf(" 0 = No output\n");
printf(" 1 = Every 1 iteration\n");
printf(" 5 = Every 5 iterations\n");
printf(" Default value: %d\n", OUTPUT_TO_XML);
// Set the appropriate return value
retval = false;
}
#endif
// Free malloced memory
free(executable);
executable = nullptr;
// return the appropriate code.
return retval;
}
/** getOutputDir
* Function which gets the global char array contianign the path for output
* @return char array containing relative path to output locaiton
*/
const char* getOutputDir(){
return outputpath;
}
/** parentDirectoryOfPath
* Function which given a path removes the last segment, copying into a pre-defined buffer.
* @param parent pre allocated buffer for the shoretened path
* @param path input path to be shortented
*/
void parentDirectoryOfPath(char * parent, char * path) {
int i = 0;
int lastd = -1;
while (path[i] != '\0')
{
/* For windows directories */
if (path[i] == '\\') lastd = i;
/* For unix directories */
if (path[i] == '/') lastd = i;
i++;
}
strcpy(parent, path);
//parent[lastd + 1] = '\0';
// Replace the traling slash, as files and directories cannot have the same name.
parent[lastd + 1] = '\0';
}
/** getPathProperties
* Function to get information about a filepath, if it exists, is a file or is a directory
* @param path path to be checked
* @param isFile returned boolean indicating if the path points to a file.
* @param isDir return boolean indicating if the path points to a directory.
* @return boolean indicating if the path exists.
*/
bool getPathProperties(char * path, bool * isFile, bool * isDir) {
bool fileExists = false;
// Initialse bools to false.
*isFile = false;
*isDir = false;
// Buffer for stat output.
struct stat statBuf {0};
// Use stat to query the path information.
int statResult = stat(path, &statBuf);
// If stat was successfull
if (statResult == 0) {
// Update return values indicating if the path is a file or a directory.
*isDir = (statBuf.st_mode & S_IFDIR) != 0;
*isFile = (statBuf.st_mode & S_IFREG) != 0;
fileExists = *isDir || *isFile;
}
// Otherwise if stat did report an errr.
else {
// If the file does not exist, set this and continue.
if (errno == ENOENT) {
fileExists = false;
}
// For any other errors, we should abort.
else {
fprintf(stderr, "Error: An unknown error occured while processing file infomration.\n");
fflush(stdout);
exit(EXIT_FAILURE);
}
}
// Return if the file exists or not.
return fileExists;
}
/** setFilePaths
* Function to set global variables for the input XML file and its directory location
*@param input input path of model xml file
*/
void setFilePaths(char* input){
PROFILE_SCOPED_RANGE("setFilePaths");
// Get infomration about the inputpath file.
bool inputIsFile = false;
bool inputIsDir = false;
bool inputExists = getPathProperties(input, &inputIsFile, &inputIsDir);
// If input exists:
if (inputExists) {
// If it is a file
if (inputIsFile) {
//Copy input file, and proceed as normal.
strcpy(inputfile, input);
// We must get the parent directory as the output directory.
parentDirectoryOfPath(outputpath, inputfile);
}
// Otherwise it is a directory
else {
// We do not have an input file., but use this as the directory.
inputfile[0] = '\0';
strcpy(outputpath, input);
}
}
// Otherwise if the input file does not exist
else {
// The input path is empty.
inputfile[0] = '\0';
// Try to find a parent directory.
parentDirectoryOfPath(outputpath, input);
// Check if the parent directory exists.
bool dirIsFile = false;
bool dirIsDir = false;
bool dirExists = getPathProperties(outputpath, &dirIsFile, &dirIsDir);
// If the dir exists
if (dirExists) {
// IF the dir is not a directory, it is a file. Abort.
if (!dirIsDir || dirIsFile) {
printf("Error: outputpath `%s` exists, but it is not a directory.\n", outputpath);
exit(EXIT_FAILURE);
}
else {
// Otherwise the parent directory exists and is a directory.
printf("Warning: `%s` does not exist using parent directory for output.\n", input);
}
}
else {
// If the directory does not exist, use the working directory.
printf("Warning: Parent directory `%s` does not exist. Using current working directory for output.\n", outputpath);
outputpath[0] = '\0';
}
}
printf("Initial states: %s\n", inputfile[0] != '\0' ? inputfile : "(none)");
printf("Output dir: %s\n", outputpath[0] != '\0' ? outputpath : "(cwd)");
}
int getOutputXMLFrequency(int argc, char**argv){
#ifdef VISUALISATION
// If visualisation mode is set, we do not output.
return 0;
#else
// Initialise to #defined default
int outputFrequency = OUTPUT_TO_XML;
// If console mode is set and we have the right number of arguments, use the relevant index.
if (argc >= 5){
outputFrequency = (int) atoi(argv[4]);
if(outputFrequency <= 0){
outputFrequency = 0;
}
}
return outputFrequency;
#endif
}
void initCUDA(int argc, char** argv){
PROFILE_SCOPED_RANGE("initCUDA");
cudaError_t cudaStatus;
int device;
int device_count;
//default device
device = 0;
cudaStatus = cudaGetDeviceCount(&device_count);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error finding CUDA devices! Do you have a CUDA-capable GPU installed?\n");
exit(EXIT_FAILURE);
}
if (device_count == 0){
fprintf(stderr, "Error no CUDA devices found!\n");
exit(EXIT_FAILURE);
}
#ifdef VISUALISATION
if (argc >= 3){
device = atoi(argv[2]);
}
#else
if (argc >= 4){
device = atoi(argv[3]);
}
#endif
if (device >= device_count){
fprintf(stderr, "Error selecting CUDA device! Device id '%d' is not found?\n", device);
exit(EXIT_FAILURE);
}
// Select device
cudaStatus = cudaSetDevice(device);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error setting CUDA device!\n");
exit(EXIT_FAILURE);
}
// Get device properties.
cudaDeviceProp props;
cudaStatus = cudaGetDeviceProperties(&props, device);
if(cudaStatus == cudaSuccess){
#ifdef _MSC_VER
const char * driverMode = props.tccDriver ? "TCC" : "WDDM";
#else
const char * driverMode = "Linux";
#endif
fprintf(stdout, "GPU %d: %s, SM%d%d, %s, pciBusId %d\n", device, props.name, props.major, props.minor, driverMode, props.pciBusID);
} else {
fprintf(stderr, "Error Accessing Cuda Device properties for GPU %d\n", device);
}
cudaFree(0);
}
void runConsoleWithoutXMLOutput(int iterations){
PROFILE_SCOPED_RANGE("runConsoleWithoutXMLOutput");
// Iteratively tun the correct number of iterations.
for (int i=0; i< iterations; i++)
{
printf("Processing Simulation Step %i\n", i+1);
//single simulation iteration
singleIteration();
}
}
void runConsoleWithXMLOutput(int iterations, int outputFrequency){
PROFILE_SCOPED_RANGE("runConsoleWithXMLOutput");
// Iteratively tun the correct number of iterations.
for (int i=0; i< iterations; i++)
{
printf("Processing Simulation Step %i\n", i+1);
//single simulation iteration
singleIteration();
// Save the iteration data to disk
if((i+1) % outputFrequency == 0){
saveIterationData(outputpath, i+1, get_host_Person_default_agents(), get_device_Person_default_agents(), get_agent_Person_default_count(),get_host_Person_s2_agents(), get_device_Person_s2_agents(), get_agent_Person_s2_count(),get_host_TBAssignment_tbdefault_agents(), get_device_TBAssignment_tbdefault_agents(), get_agent_TBAssignment_tbdefault_count(),get_host_Household_hhdefault_agents(), get_device_Household_hhdefault_agents(), get_agent_Household_hhdefault_count(),get_host_HouseholdMembership_hhmembershipdefault_agents(), get_device_HouseholdMembership_hhmembershipdefault_agents(), get_agent_HouseholdMembership_hhmembershipdefault_count(),get_host_Church_chudefault_agents(), get_device_Church_chudefault_agents(), get_agent_Church_chudefault_count(),get_host_ChurchMembership_chumembershipdefault_agents(), get_device_ChurchMembership_chumembershipdefault_agents(), get_agent_ChurchMembership_chumembershipdefault_count(),get_host_Transport_trdefault_agents(), get_device_Transport_trdefault_agents(), get_agent_Transport_trdefault_count(),get_host_TransportMembership_trmembershipdefault_agents(), get_device_TransportMembership_trmembershipdefault_agents(), get_agent_TransportMembership_trmembershipdefault_count(),get_host_Clinic_cldefault_agents(), get_device_Clinic_cldefault_agents(), get_agent_Clinic_cldefault_count(),get_host_Workplace_wpdefault_agents(), get_device_Workplace_wpdefault_agents(), get_agent_Workplace_wpdefault_count(),get_host_WorkplaceMembership_wpmembershipdefault_agents(), get_device_WorkplaceMembership_wpmembershipdefault_agents(), get_agent_WorkplaceMembership_wpmembershipdefault_count(),get_host_Bar_bdefault_agents(), get_device_Bar_bdefault_agents(), get_agent_Bar_bdefault_count(),get_host_School_schdefault_agents(), get_device_School_schdefault_agents(), get_agent_School_schdefault_count(),get_host_SchoolMembership_schmembershipdefault_agents(), get_device_SchoolMembership_schmembershipdefault_agents(), get_agent_SchoolMembership_schmembershipdefault_count());
printf("Iteration %i Saved to XML\n", i+1);
}
}
// If we did not yet output the final iteration, output the final iteration.
if(iterations % outputFrequency != 0){
saveIterationData(outputpath, iterations, get_host_Person_default_agents(), get_device_Person_default_agents(), get_agent_Person_default_count(),get_host_Person_s2_agents(), get_device_Person_s2_agents(), get_agent_Person_s2_count(),get_host_TBAssignment_tbdefault_agents(), get_device_TBAssignment_tbdefault_agents(), get_agent_TBAssignment_tbdefault_count(),get_host_Household_hhdefault_agents(), get_device_Household_hhdefault_agents(), get_agent_Household_hhdefault_count(),get_host_HouseholdMembership_hhmembershipdefault_agents(), get_device_HouseholdMembership_hhmembershipdefault_agents(), get_agent_HouseholdMembership_hhmembershipdefault_count(),get_host_Church_chudefault_agents(), get_device_Church_chudefault_agents(), get_agent_Church_chudefault_count(),get_host_ChurchMembership_chumembershipdefault_agents(), get_device_ChurchMembership_chumembershipdefault_agents(), get_agent_ChurchMembership_chumembershipdefault_count(),get_host_Transport_trdefault_agents(), get_device_Transport_trdefault_agents(), get_agent_Transport_trdefault_count(),get_host_TransportMembership_trmembershipdefault_agents(), get_device_TransportMembership_trmembershipdefault_agents(), get_agent_TransportMembership_trmembershipdefault_count(),get_host_Clinic_cldefault_agents(), get_device_Clinic_cldefault_agents(), get_agent_Clinic_cldefault_count(),get_host_Workplace_wpdefault_agents(), get_device_Workplace_wpdefault_agents(), get_agent_Workplace_wpdefault_count(),get_host_WorkplaceMembership_wpmembershipdefault_agents(), get_device_WorkplaceMembership_wpmembershipdefault_agents(), get_agent_WorkplaceMembership_wpmembershipdefault_count(),get_host_Bar_bdefault_agents(), get_device_Bar_bdefault_agents(), get_agent_Bar_bdefault_count(),get_host_School_schdefault_agents(), get_device_School_schdefault_agents(), get_agent_School_schdefault_count(),get_host_SchoolMembership_schmembershipdefault_agents(), get_device_SchoolMembership_schmembershipdefault_agents(), get_agent_SchoolMembership_schmembershipdefault_count());
printf("Iteration %i Saved to XML\n", iterations);
}
}
/**
* Program main (Handles arguments)
*/
int main( int argc, char** argv)
{
cudaError_t cudaStatus;
//check usage mode
if (!checkUsage(argc, argv))
exit(EXIT_FAILURE);
//get the directory paths
setFilePaths(argv[1]);
//determine frequency we want to output to xml.
int outputXMLFrequency = getOutputXMLFrequency(argc, argv);
//initialise CUDA
initCUDA(argc, argv);
#ifdef VISUALISATION
//Init visualisation must be done before simulation init
initVisualisation();
#endif
//initialise the simulation
initialise(inputfile);
#ifdef VISUALISATION
runVisualisation();
exit(EXIT_SUCCESS);
#else
//Benchmark simulation
cudaEvent_t start, stop;
float milliseconds = 0;
//create timing events
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Get the number of iterations
int iterations = atoi(argv[2]);
if (iterations <= 0)
{
printf("Second argument must be a positive integer (Number of Iterations)\n");
exit(EXIT_FAILURE);
}
//start timing
cudaEventRecord(start);
// Launch the main loop with / without xml output.
if(outputXMLFrequency > 0){
runConsoleWithXMLOutput(iterations, outputXMLFrequency);
} else {
runConsoleWithoutXMLOutput(iterations);
}
//CUDA stop timing
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf( "Total Processing time: %f (ms)\n", milliseconds);
#endif
cleanup();
PROFILE_PUSH_RANGE("cudaDeviceReset");
cudaStatus = cudaDeviceReset();
PROFILE_POP_RANGE();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error resetting the device!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
f129b67e2f5b1871288eaaba83d420ed00dd2114.hip | // !!! This is a file automatically generated by hipify!!!
#include "flamegpu/gpu/CUDAScatter.cuh"
#include <hip/hip_runtime.h>
#include <vector>
#include <cassert>
#include "flamegpu/gpu/detail/CUDAErrorChecking.cuh"
#include "flamegpu/gpu/CUDAFatAgentStateList.h"
#ifdef _MSC_VER
#pragma warning(push, 1)
#pragma warning(disable : 4706 4834)
#include <hipcub/hipcub.hpp>
#pragma warning(pop)
#else
#include <hipcub/hipcub.hpp>
#endif
namespace flamegpu {
// @todo - Make _async variants of functions which launch kernels. This can be called by the non async version and immediately sync.
CUDAScatter::StreamData::StreamData()
: d_data(nullptr)
, data_len(0) {
}
CUDAScatter::StreamData::~StreamData() {
/* @note - Do not clear cuda memory in the destructor of singletons.
This is because order of static destruction in c++ is undefined
So the cuda driver is not guaranteed to still exist when the static is destroyed.
As this is only ever destroyed at exit time, it's not a real memory leak either.
*/
if (d_data) {
gpuErrchk(hipFree(d_data));
}
d_data = nullptr;
data_len = 0;
}
void CUDAScatter::StreamData::purge() {
d_data = nullptr;
data_len = 0;
}
void CUDAScatter::StreamData::resize(const unsigned int &newLen) {
if (newLen > data_len) {
if (d_data) {
gpuErrchk(hipFree(d_data));
}
gpuErrchk(hipMalloc(&d_data, newLen * sizeof(ScatterData)));
data_len = newLen;
}
}
void CUDAScatter::purge() {
for (auto &s : streamResources) {
s.purge();
}
scan.purge();
}
template <typename T>
__global__ void scatter_generic(
unsigned int threadCount,
T scan_flag,
unsigned int *position,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset = 0,
const unsigned int scatter_all_count = 0) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// if optional message is to be written
if (index < scatter_all_count || scan_flag[index - scatter_all_count] == 1) {
int output_index = index < scatter_all_count ? index : scatter_all_count + position[index - scatter_all_count];
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + ((out_index_offset + output_index) * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
}
__global__ void scatter_position_generic(
unsigned int threadCount,
unsigned int *position,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// if optional message is to be written
int input_index = position[index];
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + (index * scatter_data[i].typeLen), scatter_data[i].in + (input_index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
__global__ void scatter_all_generic(
unsigned int threadCount,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset = 0) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + ((out_index_offset + index) * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
unsigned int CUDAScatter::scatter(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const Type &messageOrAgent,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int &out_index_offset,
const bool &invert_scan_flag,
const unsigned int &scatter_all_count) {
std::vector<ScatterData> scatterData;
for (const auto &v : vars) {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
scatterData.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
}
return scatter(streamResourceId, stream, messageOrAgent, scatterData, itemCount, out_index_offset, invert_scan_flag, scatter_all_count);
}
unsigned int CUDAScatter::scatter(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const Type &messageOrAgent,
const std::vector<ScatterData> &sd,
const unsigned int &itemCount,
const unsigned int &out_index_offset,
const bool &invert_scan_flag,
const unsigned int &scatter_all_count) {
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_generic<unsigned int*>, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
// Make sure we have enough space to store scatterdata
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
if (invert_scan_flag) {
hipLaunchKernelGGL(( scatter_generic) , dim3(gridSize), dim3(blockSize), 0, stream,
itemCount,
InversionIterator(scan.Config(messageOrAgent, streamResourceId).d_ptrs.scan_flag),
scan.Config(messageOrAgent, streamResourceId).d_ptrs.position,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
out_index_offset, scatter_all_count);
} else {
hipLaunchKernelGGL(( scatter_generic) , dim3(gridSize), dim3(blockSize), 0, stream,
itemCount,
scan.Config(messageOrAgent, streamResourceId).d_ptrs.scan_flag,
scan.Config(messageOrAgent, streamResourceId).d_ptrs.position,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
out_index_offset, scatter_all_count);
}
gpuErrchkLaunch();
// Update count of live agents
unsigned int rtn = 0;
gpuErrchk(hipMemcpyAsync(&rtn, scan.Config(messageOrAgent, streamResourceId).d_ptrs.position + itemCount - scatter_all_count, sizeof(unsigned int), hipMemcpyDeviceToHost, stream));
gpuErrchk(hipStreamSynchronize(stream)); // @todo - async + sync variants.
return rtn + scatter_all_count;
}
void CUDAScatter::scatterPosition(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const Type &messageOrAgent,
const std::vector<ScatterData> &sd,
const unsigned int &itemCount) {
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_position_generic, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
// Make sure we have enough space to store scatterdata
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( scatter_position_generic) , dim3(gridSize), dim3(blockSize), 0, stream,
itemCount,
scan.Config(messageOrAgent, streamResourceId).d_ptrs.position,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()));
gpuErrchkLaunch();
gpuErrchk(hipStreamSynchronize(stream)); // @todo - async + sync variants.
}
unsigned int CUDAScatter::scatterCount(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const Type &messageOrAgent,
const unsigned int &itemCount,
const unsigned int &scatter_all_count) {
unsigned int rtn = 0;
gpuErrchk(hipMemcpy(&rtn, scan.Config(messageOrAgent, streamResourceId).d_ptrs.position + itemCount - scatter_all_count, sizeof(unsigned int), hipMemcpyDeviceToHost));
return rtn;
}
unsigned int CUDAScatter::scatterAll(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const std::vector<ScatterData> &sd,
const unsigned int &itemCount,
const unsigned int &out_index_offset) {
if (!itemCount)
return itemCount; // No work to do
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_all_generic, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( scatter_all_generic) , dim3(gridSize), dim3(blockSize), 0, stream,
itemCount,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
out_index_offset);
gpuErrchkLaunch();
gpuErrchk(hipStreamSynchronize(stream)); // @todo - async + sync variants.
// Update count of live agents
return itemCount;
}
unsigned int CUDAScatter::scatterAll(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int &out_index_offset) {
std::vector<ScatterData> scatterData;
for (const auto &v : vars) {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
scatterData.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
}
return scatterAll(streamResourceId, stream, scatterData, itemCount, out_index_offset);
}
__global__ void pbm_reorder_generic(
const unsigned int threadCount,
const unsigned int * __restrict__ bin_index,
const unsigned int * __restrict__ bin_sub_index,
const unsigned int * __restrict__ pbm,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
const unsigned int sorted_index = pbm[bin_index[index]] + bin_sub_index[index];
// if optional message is to be written
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + (sorted_index * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
void CUDAScatter::pbm_reorder(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int *d_bin_index,
const unsigned int *d_bin_sub_index,
const unsigned int *d_pbm) {
// If itemCount is 0, then there is no work to be done.
if (itemCount == 0) {
return;
}
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pbm_reorder_generic, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
// for each variable, scatter from swap to regular
std::vector<ScatterData> sd;
for (const auto &v : vars) {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
sd.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
}
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( pbm_reorder_generic) , dim3(gridSize), dim3(blockSize), 0, stream,
itemCount,
d_bin_index,
d_bin_sub_index,
d_pbm,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()));
gpuErrchkLaunch();
gpuErrchk(hipStreamSynchronize(stream)); // @todo - async + sync variants.
}
/**
* Scatter kernel for host agent creation
* Input data is stored in AoS, and translated to SoA for device
* @param threadCount Total number of threads required
* @param agent_size The total size of an agent's variables in memory, for stepping through input array
* @param scatter_data Scatter data array location in memory
* @param scatter_len Length of scatter data array
* @param out_index_offset The number of agents already in the output array (so that they are not overwritten)
*/
__global__ void scatter_new_agents(
const unsigned int threadCount,
const unsigned int agent_size,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// Which variable are we outputting
const unsigned int var_out = index % scatter_len;
const unsigned int agent_index = index / scatter_len;
// if optional message is to be written
char * const in_ptr = scatter_data[var_out].in + (agent_index * agent_size);
char * const out_ptr = scatter_data[var_out].out + ((out_index_offset + agent_index) * scatter_data[var_out].typeLen);
memcpy(out_ptr, in_ptr, scatter_data[var_out].typeLen);
}
void CUDAScatter::scatterNewAgents(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const std::vector<ScatterData> &sd,
const size_t &totalAgentSize,
const unsigned int &inCount,
const unsigned int &outIndexOffset) {
// 1 thread per agent variable
const unsigned int threadCount = static_cast<unsigned int>(sd.size()) * inCount;
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_new_agents, 0, threadCount));
//! Round up according to CUDAAgent state list size
gridSize = (threadCount + blockSize - 1) / blockSize;
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( scatter_new_agents) , dim3(gridSize), dim3(blockSize), 0, stream,
threadCount,
static_cast<unsigned int>(totalAgentSize),
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
outIndexOffset);
gpuErrchkLaunch();
gpuErrchk(hipStreamSynchronize(stream)); // @todo - async + sync variants.
}
/**
* Broadcast kernel for initialising agent variables to default on device
* Input data is stored pointed directly do by scatter_data and translated to SoA for device
* @param threadCount Total number of threads required
* @param scatter_data Scatter data array location in memory
* @param scatter_len Length of scatter data array
* @param out_index_offset The number of agents already in the output array (so that they are not overwritten)
*/
__global__ void broadcastInitKernel(
const unsigned int threadCount,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// Which variable are we outputting
const unsigned int var_out = index % scatter_len;
const unsigned int agent_index = index / scatter_len;
const unsigned int type_len = scatter_data[var_out].typeLen;
// if optional message is to be written
char * const in_ptr = scatter_data[var_out].in;
char * const out_ptr = scatter_data[var_out].out + ((out_index_offset + agent_index) * type_len);
memcpy(out_ptr, in_ptr, type_len);
}
void CUDAScatter::broadcastInit(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const std::list<std::shared_ptr<VariableBuffer>> &vars,
const unsigned int &inCount,
const unsigned int outIndexOffset) {
// No variables means no work to do
if (vars.size() == 0) return;
// 1 thread per agent variable
const unsigned int threadCount = static_cast<unsigned int>(vars.size()) * inCount;
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, broadcastInitKernel, 0, threadCount));
//! Round up according to CUDAAgent state list size
gridSize = (threadCount + blockSize - 1) / blockSize;
// Calculate memory usage (crudely in multiples of ScatterData)
ptrdiff_t offset = 0;
for (const auto &v : vars) {
offset += v->type_size * v->elements;
}
streamResources[streamResourceId].resize(static_cast<unsigned int>(offset + vars.size() * sizeof(ScatterData)));
// Build scatter data structure and init data
std::vector<ScatterData> sd;
char *default_data = reinterpret_cast<char*>(malloc(offset));
offset = 0;
for (const auto &v : vars) {
// Scatter data
char *in_p = reinterpret_cast<char*>(streamResources[streamResourceId].d_data) + offset;
char *out_p = reinterpret_cast<char*>(v->data_condition);
sd.push_back({ v->type_size * v->elements, in_p, out_p });
// Init data
memcpy(default_data + offset, v->default_value, v->type_size * v->elements);
// Update offset
offset += v->type_size * v->elements;
}
// Important that sd.size() is used here, as allocated len would exceed 2nd memcpy
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, default_data, offset, hipMemcpyHostToDevice, stream));
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data + offset, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
::free(default_data);
hipLaunchKernelGGL(( broadcastInitKernel) , dim3(gridSize), dim3(blockSize), 0, stream,
threadCount,
streamResources[streamResourceId].d_data + offset, static_cast<unsigned int>(sd.size()),
outIndexOffset);
gpuErrchkLaunch();
gpuErrchk(hipStreamSynchronize(stream)); // @todo - async + sync variants.
}
void CUDAScatter::broadcastInit(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const VariableMap &vars,
void * const d_newBuff,
const unsigned int &inCount,
const unsigned int outIndexOffset) {
// 1 thread per agent variable
const unsigned int threadCount = static_cast<unsigned int>(vars.size()) * inCount;
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, broadcastInitKernel, 0, threadCount));
//! Round up according to CUDAAgent state list size
gridSize = (threadCount + blockSize - 1) / blockSize;
// Calculate memory usage (crudely in multiples of ScatterData)
std::vector<ScatterData> sd;
ptrdiff_t offset = 0;
for (const auto &v : vars) {
offset += v.second.type_size * v.second.elements;
}
char *default_data = reinterpret_cast<char*>(malloc(offset));
streamResources[streamResourceId].resize(static_cast<unsigned int>(offset + vars.size() * sizeof(ScatterData)));
// Build scatter data structure
offset = 0;
char * d_var = static_cast<char*>(d_newBuff);
for (const auto &v : vars) {
// In this case, in is the location of first variable, but we step by inOffsetData.totalSize
char *in_p = reinterpret_cast<char*>(streamResources[streamResourceId].d_data) + offset;
char *out_p = d_var;
sd.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
// Build init data
memcpy(default_data + offset, v.second.default_value, v.second.type_size * v.second.elements);
// Prep pointer for next var
d_var += v.second.type_size * v.second.elements * inCount;
// Update offset
offset += v.second.type_size * v.second.elements;
}
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, default_data, offset, hipMemcpyHostToDevice, stream));
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data + offset, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
::free(default_data);
hipLaunchKernelGGL(( broadcastInitKernel) , dim3(gridSize), dim3(blockSize), 0, stream,
threadCount,
streamResources[streamResourceId].d_data + offset, static_cast<unsigned int>(sd.size()),
outIndexOffset);
gpuErrchkLaunch();
gpuErrchk(hipStreamSynchronize(stream)); // @todo - async + sync variants.
}
__global__ void reorder_array_messages(
const unsigned int threadCount,
const unsigned int array_length,
const unsigned int *d_position,
#if !defined(SEATBELTS) || SEATBELTS
unsigned int *d_write_flag,
#endif
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len
) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
const unsigned int output_index = d_position[index];
// If out of bounds, put it in 1 out of bounds slot
if (output_index < array_length) {
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + (output_index * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
#if !defined(SEATBELTS) || SEATBELTS
// Set err check flag
atomicInc(d_write_flag + output_index, UINT_MAX);
#endif
}
}
void CUDAScatter::arrayMessageReorder(
const unsigned int &streamResourceId,
const hipStream_t &stream,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int &array_length,
unsigned int *d_write_flag) {
// If itemCount is 0, then there is no work to be done.
if (itemCount == 0) {
return;
}
if (itemCount > array_length) {
THROW exception::ArrayMessageWriteConflict("Too many messages output for array message structure (%u > %u).\n", itemCount, array_length);
}
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, reorder_array_messages, 0, itemCount);
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
unsigned int *d_position = nullptr;
// Build AoS -> AoS list
std::vector<ScatterData> sd;
for (const auto &v : vars) {
if (v.first != "___INDEX") {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
sd.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
} else { // Special case, log index var
d_position = reinterpret_cast<unsigned int*>(in.at(v.first));
d_write_flag = d_write_flag ? d_write_flag : reinterpret_cast<unsigned int*>(out.at(v.first));
}
}
assert(d_position); // Not an array message, lacking ___INDEX var
size_t t_data_len = 0;
{ // Decide curve memory requirements
gpuErrchk(hipcub::DeviceReduce::Max(nullptr, t_data_len, d_write_flag, d_position, array_length, stream));
if (t_data_len > streamResources[streamResourceId].data_len * sizeof(ScatterData)) {
// t_data_len is bigger than current allocation
if (t_data_len > sd.size() * sizeof(ScatterData)) {
// td_data_len is bigger than sd.size()
streamResources[streamResourceId].resize(static_cast<unsigned int>((t_data_len / sizeof(ScatterData)) + 1));
} else {
// sd.size() is bigger
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
}
}
}
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(hipMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( reorder_array_messages) , dim3(gridSize), dim3(blockSize), 0, stream ,
itemCount, array_length,
d_position,
#if !defined(SEATBELTS) || SEATBELTS
d_write_flag,
#endif
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()));
gpuErrchkLaunch();
#if !defined(SEATBELTS) || SEATBELTS
// Check d_write_flag for dupes
gpuErrchk(hipcub::DeviceReduce::Max(streamResources[streamResourceId].d_data, t_data_len, d_write_flag, d_position, array_length, stream));
unsigned int maxBinSize = 0;
gpuErrchk(hipMemcpyAsync(&maxBinSize, d_position, sizeof(unsigned int), hipMemcpyDeviceToHost, stream));
gpuErrchk(hipStreamSynchronize(stream));
if (maxBinSize > 1) {
// Too many messages for single element of array
// Report bad ones
unsigned int *hd_write_flag = (unsigned int *)malloc(sizeof(unsigned int) * array_length);
gpuErrchk(hipMemcpy(hd_write_flag, d_write_flag, sizeof(unsigned int)* array_length, hipMemcpyDeviceToHost));
for (unsigned int i = 0; i < array_length; ++i) {
if (hd_write_flag[i] > 1)
fprintf(stderr, "Array messagelist contains %u messages at index %u!\n", hd_write_flag[i], i);
}
THROW exception::ArrayMessageWriteConflict("Multiple threads output array messages to the same index, see stderr.\n");
}
#endif
}
} // namespace flamegpu
| f129b67e2f5b1871288eaaba83d420ed00dd2114.cu | #include "flamegpu/gpu/CUDAScatter.cuh"
#include <cuda_runtime.h>
#include <vector>
#include <cassert>
#include "flamegpu/gpu/detail/CUDAErrorChecking.cuh"
#include "flamegpu/gpu/CUDAFatAgentStateList.h"
#ifdef _MSC_VER
#pragma warning(push, 1)
#pragma warning(disable : 4706 4834)
#include <cub/cub.cuh>
#pragma warning(pop)
#else
#include <cub/cub.cuh>
#endif
namespace flamegpu {
// @todo - Make _async variants of functions which launch kernels. This can be called by the non async version and immediately sync.
CUDAScatter::StreamData::StreamData()
: d_data(nullptr)
, data_len(0) {
}
CUDAScatter::StreamData::~StreamData() {
/* @note - Do not clear cuda memory in the destructor of singletons.
This is because order of static destruction in c++ is undefined
So the cuda driver is not guaranteed to still exist when the static is destroyed.
As this is only ever destroyed at exit time, it's not a real memory leak either.
*/
if (d_data) {
gpuErrchk(cudaFree(d_data));
}
d_data = nullptr;
data_len = 0;
}
void CUDAScatter::StreamData::purge() {
d_data = nullptr;
data_len = 0;
}
void CUDAScatter::StreamData::resize(const unsigned int &newLen) {
if (newLen > data_len) {
if (d_data) {
gpuErrchk(cudaFree(d_data));
}
gpuErrchk(cudaMalloc(&d_data, newLen * sizeof(ScatterData)));
data_len = newLen;
}
}
void CUDAScatter::purge() {
for (auto &s : streamResources) {
s.purge();
}
scan.purge();
}
template <typename T>
__global__ void scatter_generic(
unsigned int threadCount,
T scan_flag,
unsigned int *position,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset = 0,
const unsigned int scatter_all_count = 0) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// if optional message is to be written
if (index < scatter_all_count || scan_flag[index - scatter_all_count] == 1) {
int output_index = index < scatter_all_count ? index : scatter_all_count + position[index - scatter_all_count];
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + ((out_index_offset + output_index) * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
}
__global__ void scatter_position_generic(
unsigned int threadCount,
unsigned int *position,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// if optional message is to be written
int input_index = position[index];
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + (index * scatter_data[i].typeLen), scatter_data[i].in + (input_index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
__global__ void scatter_all_generic(
unsigned int threadCount,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset = 0) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + ((out_index_offset + index) * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
unsigned int CUDAScatter::scatter(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const Type &messageOrAgent,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int &out_index_offset,
const bool &invert_scan_flag,
const unsigned int &scatter_all_count) {
std::vector<ScatterData> scatterData;
for (const auto &v : vars) {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
scatterData.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
}
return scatter(streamResourceId, stream, messageOrAgent, scatterData, itemCount, out_index_offset, invert_scan_flag, scatter_all_count);
}
unsigned int CUDAScatter::scatter(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const Type &messageOrAgent,
const std::vector<ScatterData> &sd,
const unsigned int &itemCount,
const unsigned int &out_index_offset,
const bool &invert_scan_flag,
const unsigned int &scatter_all_count) {
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_generic<unsigned int*>, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
// Make sure we have enough space to store scatterdata
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
if (invert_scan_flag) {
scatter_generic <<<gridSize, blockSize, 0, stream>>> (
itemCount,
InversionIterator(scan.Config(messageOrAgent, streamResourceId).d_ptrs.scan_flag),
scan.Config(messageOrAgent, streamResourceId).d_ptrs.position,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
out_index_offset, scatter_all_count);
} else {
scatter_generic <<<gridSize, blockSize, 0, stream>>> (
itemCount,
scan.Config(messageOrAgent, streamResourceId).d_ptrs.scan_flag,
scan.Config(messageOrAgent, streamResourceId).d_ptrs.position,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
out_index_offset, scatter_all_count);
}
gpuErrchkLaunch();
// Update count of live agents
unsigned int rtn = 0;
gpuErrchk(cudaMemcpyAsync(&rtn, scan.Config(messageOrAgent, streamResourceId).d_ptrs.position + itemCount - scatter_all_count, sizeof(unsigned int), cudaMemcpyDeviceToHost, stream));
gpuErrchk(cudaStreamSynchronize(stream)); // @todo - async + sync variants.
return rtn + scatter_all_count;
}
void CUDAScatter::scatterPosition(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const Type &messageOrAgent,
const std::vector<ScatterData> &sd,
const unsigned int &itemCount) {
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_position_generic, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
// Make sure we have enough space to store scatterdata
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
scatter_position_generic <<<gridSize, blockSize, 0, stream>>> (
itemCount,
scan.Config(messageOrAgent, streamResourceId).d_ptrs.position,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()));
gpuErrchkLaunch();
gpuErrchk(cudaStreamSynchronize(stream)); // @todo - async + sync variants.
}
unsigned int CUDAScatter::scatterCount(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const Type &messageOrAgent,
const unsigned int &itemCount,
const unsigned int &scatter_all_count) {
unsigned int rtn = 0;
gpuErrchk(cudaMemcpy(&rtn, scan.Config(messageOrAgent, streamResourceId).d_ptrs.position + itemCount - scatter_all_count, sizeof(unsigned int), cudaMemcpyDeviceToHost));
return rtn;
}
unsigned int CUDAScatter::scatterAll(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const std::vector<ScatterData> &sd,
const unsigned int &itemCount,
const unsigned int &out_index_offset) {
if (!itemCount)
return itemCount; // No work to do
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_all_generic, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
scatter_all_generic <<<gridSize, blockSize, 0, stream>>> (
itemCount,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
out_index_offset);
gpuErrchkLaunch();
gpuErrchk(cudaStreamSynchronize(stream)); // @todo - async + sync variants.
// Update count of live agents
return itemCount;
}
unsigned int CUDAScatter::scatterAll(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int &out_index_offset) {
std::vector<ScatterData> scatterData;
for (const auto &v : vars) {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
scatterData.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
}
return scatterAll(streamResourceId, stream, scatterData, itemCount, out_index_offset);
}
__global__ void pbm_reorder_generic(
const unsigned int threadCount,
const unsigned int * __restrict__ bin_index,
const unsigned int * __restrict__ bin_sub_index,
const unsigned int * __restrict__ pbm,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
const unsigned int sorted_index = pbm[bin_index[index]] + bin_sub_index[index];
// if optional message is to be written
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + (sorted_index * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
}
void CUDAScatter::pbm_reorder(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int *d_bin_index,
const unsigned int *d_bin_sub_index,
const unsigned int *d_pbm) {
// If itemCount is 0, then there is no work to be done.
if (itemCount == 0) {
return;
}
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pbm_reorder_generic, 0, itemCount));
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
// for each variable, scatter from swap to regular
std::vector<ScatterData> sd;
for (const auto &v : vars) {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
sd.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
}
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
pbm_reorder_generic <<<gridSize, blockSize, 0, stream>>> (
itemCount,
d_bin_index,
d_bin_sub_index,
d_pbm,
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()));
gpuErrchkLaunch();
gpuErrchk(cudaStreamSynchronize(stream)); // @todo - async + sync variants.
}
/**
* Scatter kernel for host agent creation
* Input data is stored in AoS, and translated to SoA for device
* @param threadCount Total number of threads required
* @param agent_size The total size of an agent's variables in memory, for stepping through input array
* @param scatter_data Scatter data array location in memory
* @param scatter_len Length of scatter data array
* @param out_index_offset The number of agents already in the output array (so that they are not overwritten)
*/
__global__ void scatter_new_agents(
const unsigned int threadCount,
const unsigned int agent_size,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// Which variable are we outputting
const unsigned int var_out = index % scatter_len;
const unsigned int agent_index = index / scatter_len;
// if optional message is to be written
char * const in_ptr = scatter_data[var_out].in + (agent_index * agent_size);
char * const out_ptr = scatter_data[var_out].out + ((out_index_offset + agent_index) * scatter_data[var_out].typeLen);
memcpy(out_ptr, in_ptr, scatter_data[var_out].typeLen);
}
void CUDAScatter::scatterNewAgents(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const std::vector<ScatterData> &sd,
const size_t &totalAgentSize,
const unsigned int &inCount,
const unsigned int &outIndexOffset) {
// 1 thread per agent variable
const unsigned int threadCount = static_cast<unsigned int>(sd.size()) * inCount;
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, scatter_new_agents, 0, threadCount));
//! Round up according to CUDAAgent state list size
gridSize = (threadCount + blockSize - 1) / blockSize;
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
scatter_new_agents <<<gridSize, blockSize, 0, stream>>> (
threadCount,
static_cast<unsigned int>(totalAgentSize),
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()),
outIndexOffset);
gpuErrchkLaunch();
gpuErrchk(cudaStreamSynchronize(stream)); // @todo - async + sync variants.
}
/**
* Broadcast kernel for initialising agent variables to default on device
* Input data is stored pointed directly do by scatter_data and translated to SoA for device
* @param threadCount Total number of threads required
* @param scatter_data Scatter data array location in memory
* @param scatter_len Length of scatter data array
* @param out_index_offset The number of agents already in the output array (so that they are not overwritten)
*/
__global__ void broadcastInitKernel(
const unsigned int threadCount,
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len,
const unsigned int out_index_offset) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
// Which variable are we outputting
const unsigned int var_out = index % scatter_len;
const unsigned int agent_index = index / scatter_len;
const unsigned int type_len = scatter_data[var_out].typeLen;
// if optional message is to be written
char * const in_ptr = scatter_data[var_out].in;
char * const out_ptr = scatter_data[var_out].out + ((out_index_offset + agent_index) * type_len);
memcpy(out_ptr, in_ptr, type_len);
}
void CUDAScatter::broadcastInit(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const std::list<std::shared_ptr<VariableBuffer>> &vars,
const unsigned int &inCount,
const unsigned int outIndexOffset) {
// No variables means no work to do
if (vars.size() == 0) return;
// 1 thread per agent variable
const unsigned int threadCount = static_cast<unsigned int>(vars.size()) * inCount;
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, broadcastInitKernel, 0, threadCount));
//! Round up according to CUDAAgent state list size
gridSize = (threadCount + blockSize - 1) / blockSize;
// Calculate memory usage (crudely in multiples of ScatterData)
ptrdiff_t offset = 0;
for (const auto &v : vars) {
offset += v->type_size * v->elements;
}
streamResources[streamResourceId].resize(static_cast<unsigned int>(offset + vars.size() * sizeof(ScatterData)));
// Build scatter data structure and init data
std::vector<ScatterData> sd;
char *default_data = reinterpret_cast<char*>(malloc(offset));
offset = 0;
for (const auto &v : vars) {
// Scatter data
char *in_p = reinterpret_cast<char*>(streamResources[streamResourceId].d_data) + offset;
char *out_p = reinterpret_cast<char*>(v->data_condition);
sd.push_back({ v->type_size * v->elements, in_p, out_p });
// Init data
memcpy(default_data + offset, v->default_value, v->type_size * v->elements);
// Update offset
offset += v->type_size * v->elements;
}
// Important that sd.size() is used here, as allocated len would exceed 2nd memcpy
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, default_data, offset, cudaMemcpyHostToDevice, stream));
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data + offset, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
::free(default_data);
broadcastInitKernel <<<gridSize, blockSize, 0, stream>>> (
threadCount,
streamResources[streamResourceId].d_data + offset, static_cast<unsigned int>(sd.size()),
outIndexOffset);
gpuErrchkLaunch();
gpuErrchk(cudaStreamSynchronize(stream)); // @todo - async + sync variants.
}
void CUDAScatter::broadcastInit(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const VariableMap &vars,
void * const d_newBuff,
const unsigned int &inCount,
const unsigned int outIndexOffset) {
// 1 thread per agent variable
const unsigned int threadCount = static_cast<unsigned int>(vars.size()) * inCount;
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
gpuErrchk(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, broadcastInitKernel, 0, threadCount));
//! Round up according to CUDAAgent state list size
gridSize = (threadCount + blockSize - 1) / blockSize;
// Calculate memory usage (crudely in multiples of ScatterData)
std::vector<ScatterData> sd;
ptrdiff_t offset = 0;
for (const auto &v : vars) {
offset += v.second.type_size * v.second.elements;
}
char *default_data = reinterpret_cast<char*>(malloc(offset));
streamResources[streamResourceId].resize(static_cast<unsigned int>(offset + vars.size() * sizeof(ScatterData)));
// Build scatter data structure
offset = 0;
char * d_var = static_cast<char*>(d_newBuff);
for (const auto &v : vars) {
// In this case, in is the location of first variable, but we step by inOffsetData.totalSize
char *in_p = reinterpret_cast<char*>(streamResources[streamResourceId].d_data) + offset;
char *out_p = d_var;
sd.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
// Build init data
memcpy(default_data + offset, v.second.default_value, v.second.type_size * v.second.elements);
// Prep pointer for next var
d_var += v.second.type_size * v.second.elements * inCount;
// Update offset
offset += v.second.type_size * v.second.elements;
}
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, default_data, offset, cudaMemcpyHostToDevice, stream));
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data + offset, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
::free(default_data);
broadcastInitKernel <<<gridSize, blockSize, 0, stream>>> (
threadCount,
streamResources[streamResourceId].d_data + offset, static_cast<unsigned int>(sd.size()),
outIndexOffset);
gpuErrchkLaunch();
gpuErrchk(cudaStreamSynchronize(stream)); // @todo - async + sync variants.
}
__global__ void reorder_array_messages(
const unsigned int threadCount,
const unsigned int array_length,
const unsigned int *d_position,
#if !defined(SEATBELTS) || SEATBELTS
unsigned int *d_write_flag,
#endif
CUDAScatter::ScatterData *scatter_data,
const unsigned int scatter_len
) {
// global thread index
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= threadCount) return;
const unsigned int output_index = d_position[index];
// If out of bounds, put it in 1 out of bounds slot
if (output_index < array_length) {
for (unsigned int i = 0; i < scatter_len; ++i) {
memcpy(scatter_data[i].out + (output_index * scatter_data[i].typeLen), scatter_data[i].in + (index * scatter_data[i].typeLen), scatter_data[i].typeLen);
}
#if !defined(SEATBELTS) || SEATBELTS
// Set err check flag
atomicInc(d_write_flag + output_index, UINT_MAX);
#endif
}
}
void CUDAScatter::arrayMessageReorder(
const unsigned int &streamResourceId,
const cudaStream_t &stream,
const VariableMap &vars,
const std::map<std::string, void*> &in,
const std::map<std::string, void*> &out,
const unsigned int &itemCount,
const unsigned int &array_length,
unsigned int *d_write_flag) {
// If itemCount is 0, then there is no work to be done.
if (itemCount == 0) {
return;
}
if (itemCount > array_length) {
THROW exception::ArrayMessageWriteConflict("Too many messages output for array message structure (%u > %u).\n", itemCount, array_length);
}
int blockSize = 0; // The launch configurator returned block size
int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch
int gridSize = 0; // The actual grid size needed, based on input size
// calculate the grid block size for main agent function
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, reorder_array_messages, 0, itemCount);
//! Round up according to CUDAAgent state list size
gridSize = (itemCount + blockSize - 1) / blockSize;
unsigned int *d_position = nullptr;
// Build AoS -> AoS list
std::vector<ScatterData> sd;
for (const auto &v : vars) {
if (v.first != "___INDEX") {
char *in_p = reinterpret_cast<char*>(in.at(v.first));
char *out_p = reinterpret_cast<char*>(out.at(v.first));
sd.push_back({ v.second.type_size * v.second.elements, in_p, out_p });
} else { // Special case, log index var
d_position = reinterpret_cast<unsigned int*>(in.at(v.first));
d_write_flag = d_write_flag ? d_write_flag : reinterpret_cast<unsigned int*>(out.at(v.first));
}
}
assert(d_position); // Not an array message, lacking ___INDEX var
size_t t_data_len = 0;
{ // Decide curve memory requirements
gpuErrchk(cub::DeviceReduce::Max(nullptr, t_data_len, d_write_flag, d_position, array_length, stream));
if (t_data_len > streamResources[streamResourceId].data_len * sizeof(ScatterData)) {
// t_data_len is bigger than current allocation
if (t_data_len > sd.size() * sizeof(ScatterData)) {
// td_data_len is bigger than sd.size()
streamResources[streamResourceId].resize(static_cast<unsigned int>((t_data_len / sizeof(ScatterData)) + 1));
} else {
// sd.size() is bigger
streamResources[streamResourceId].resize(static_cast<unsigned int>(sd.size()));
}
}
}
// Important that sd.size() is still used here, incase allocated len (data_len) is bigger
gpuErrchk(cudaMemcpyAsync(streamResources[streamResourceId].d_data, sd.data(), sizeof(ScatterData) * sd.size(), cudaMemcpyHostToDevice, stream));
reorder_array_messages <<<gridSize, blockSize, 0, stream >>> (
itemCount, array_length,
d_position,
#if !defined(SEATBELTS) || SEATBELTS
d_write_flag,
#endif
streamResources[streamResourceId].d_data, static_cast<unsigned int>(sd.size()));
gpuErrchkLaunch();
#if !defined(SEATBELTS) || SEATBELTS
// Check d_write_flag for dupes
gpuErrchk(cub::DeviceReduce::Max(streamResources[streamResourceId].d_data, t_data_len, d_write_flag, d_position, array_length, stream));
unsigned int maxBinSize = 0;
gpuErrchk(cudaMemcpyAsync(&maxBinSize, d_position, sizeof(unsigned int), cudaMemcpyDeviceToHost, stream));
gpuErrchk(cudaStreamSynchronize(stream));
if (maxBinSize > 1) {
// Too many messages for single element of array
// Report bad ones
unsigned int *hd_write_flag = (unsigned int *)malloc(sizeof(unsigned int) * array_length);
gpuErrchk(cudaMemcpy(hd_write_flag, d_write_flag, sizeof(unsigned int)* array_length, cudaMemcpyDeviceToHost));
for (unsigned int i = 0; i < array_length; ++i) {
if (hd_write_flag[i] > 1)
fprintf(stderr, "Array messagelist contains %u messages at index %u!\n", hd_write_flag[i], i);
}
THROW exception::ArrayMessageWriteConflict("Multiple threads output array messages to the same index, see stderr.\n");
}
#endif
}
} // namespace flamegpu
|
f427b49bc65f0fd3f6023832e4ed4a3ef02d0d87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void){
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f \n", maxError);
} | f427b49bc65f0fd3f6023832e4ed4a3ef02d0d87.cu | #include <stdio.h>
__global__ void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void){
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f \n", maxError);
} |
2a157e8366072686541c24ca547ebbeab7d4b96c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <time.h>
#include <algorithm>
typedef unsigned char u8;
constexpr unsigned CONFLICT = 0xFFFFFFFF;
constexpr unsigned SOLVED = 0xFFFFFFFE;
constexpr unsigned INVALID_POS = 0xFFFFFFFD;
template <typename T, unsigned rank, unsigned pivot_buffer_max>
__device__
void Propagate(T (&board)[rank * rank * rank * rank],
unsigned (&pivots)[pivot_buffer_max], unsigned* pivot_count, unsigned pivot, T mask) {
constexpr unsigned width = rank * rank;
// Calculate neighbor positions
unsigned row = pivot / width;
unsigned col = pivot % width;
unsigned square_row = row / rank;
unsigned square_col = col / rank;
unsigned index = threadIdx.x % (width - 1);
if (threadIdx.x < width - 1) {
// first (width - 1) threads take care of cells in the same row
col = index + (index >= col);
} else if (threadIdx.x < 2 * width - 2) {
// next (width - 1) threads take care of cells in the same column
row = index + (index >= row);
} else if (threadIdx.x < 2 * width - 2 + (rank - 1) * (rank - 1)) {
// next (rank - 1)^2 threads take care of cells in the same square
unsigned new_row = index / (rank - 1) + square_row * rank;
unsigned new_col = index % (rank - 1) + square_col * rank;
row = new_row + (new_row >= row);
col = new_col + (new_col >= col);
} else {
return;
}
// Remove candidates from neighbor and counts the rest candidate
unsigned neighbor = row * width + col;
unsigned old = __popcll(board[neighbor]);
unsigned left = __popcll(board[neighbor] &= mask);
if (left < 2 && left < old) {
unsigned i = atomicAdd(pivot_count, 1);
if (i >= pivot_buffer_max) {
printf("Pivot buffer overflow");
return;
}
// left = 0 -> conflict
// left = 1 -> new pivot
pivots[i] = CONFLICT * (1 - left) + neighbor * left;
}
}
template <typename T, unsigned rank, unsigned pivot_buffer_max>
__device__
bool Reduce(T (&board)[rank * rank * rank * rank],
unsigned (&pivots)[pivot_buffer_max], unsigned* pivot_count) {
while(*pivot_count != 0) {
// Fetch the next pivot
__syncthreads();
unsigned pivot = pivots[*pivot_count - 1];
__syncthreads();
if (threadIdx.x == 0) {
--*pivot_count;
}
__syncthreads();
// If previously encountered a conflict, abort
if (pivot == CONFLICT) {
if (threadIdx.x == 0) {
*pivot_count = 0;
}
return false;
}
__syncthreads();
Propagate<T, rank, pivot_buffer_max>(board, pivots, pivot_count, pivot, ~board[pivot]);
__syncthreads();
}
return true;
}
template <unsigned result_length>
__device__ __host__
void MinMerge(u8 (&value_a)[result_length], unsigned (&pos_a)[result_length],
u8 (&value_b)[result_length], unsigned (&pos_b)[result_length]) {
// Insertion sort to avoid extra space
unsigned a_index = 0;
for (unsigned b_index = 0; b_index < result_length; ++b_index) {
while(true) {
if (a_index == result_length) {
return;
}
if (value_b[b_index] > value_a[a_index]) {
++a_index;
} else {
break;
}
}
for (unsigned a_i2 = result_length - 1; a_i2 > a_index; --a_i2) {
pos_a[a_i2] = pos_a[a_i2 - 1];
value_a[a_i2] = value_a[a_i2 - 1];
}
pos_a[a_index] = pos_b[b_index];
value_a[a_index] = value_b[b_index];
++a_index;
}
}
// Finds result_length minimal candidates on the board,
// ignoring decided cells
template <typename T, unsigned rank, unsigned result_length>
__device__
void FindMins(T (&board)[rank * rank * rank * rank],
u8 (&values)[rank * rank * rank * rank][result_length],
unsigned (&pos)[rank * rank * rank * rank][result_length]) {
constexpr unsigned count = rank * rank * rank * rank;
for (unsigned i = threadIdx.x; i < count; i += blockDim.x) {
u8 count = (u8)__popcll(board[i]);
if (count == 1) {
values[i][0] = 0xFF;
pos[i][0] = INVALID_POS;
} else {
values[i][0] = count;
pos[i][0] = i;
}
for (unsigned j = 1; j < result_length; ++j) {
values[i][j] = 0xFF;
pos[i][j] = INVALID_POS;
}
}
__syncthreads();
unsigned step = 1 << (31 - __clz(count - 1));
for (; step > 0; step >>= 1) {
unsigned limit = min(step, count - step);
for (unsigned i = threadIdx.x; i < limit; i += blockDim.x) {
MinMerge<result_length>(values[i], pos[i], values[i + step], pos[i + step]);
}
__syncthreads();
}
}
template <typename T, unsigned rank, unsigned result_length>
__global__
void FindMinsKernel(T (&board)[rank * rank * rank * rank], unsigned (&result_pos)[result_length]) {
__shared__ u8 values[rank * rank * rank * rank][result_length];
__shared__ unsigned pos[rank * rank * rank * rank][result_length];
FindMins<T, rank, result_length>(board, values, pos);
__syncthreads();
for (unsigned i = threadIdx.x; i < result_length; i += blockDim.x) {
result_pos[i] = pos[0][i];
}
}
template <typename T, unsigned rank, unsigned max_depth, unsigned pivot_buffer_max>
__global__
void SearchTree(T (&board)[rank * rank * rank * rank],
T output_boards_start[][rank * rank * rank * rank], unsigned* total, unsigned first_candidate) {
constexpr unsigned width = rank * rank;
auto output_boards = output_boards_start;
// DFS stack. Each stack frame consists of
// board_stack[*]: snapshot of the board in reduced state
// candidate_stack[*]: position of the cell being branched on
// bit_stack[*]: bit flags of the branching cell.
// Candidates that have been tried or being tried is removed from the flags
__shared__ T board_stack[max_depth][width * width];
__shared__ unsigned candidate_stack[max_depth - 1];
__shared__ T bit_stack[max_depth - 1];
unsigned stack_size = 1; // [1, max_depth)
// scratch spaces for Reducer
__shared__ unsigned pivots[pivot_buffer_max];
__shared__ unsigned pivot_count;
// scratch spaces for popcnt min
__shared__ u8 min_value_board[width * width][1];
__shared__ unsigned min_pos_board[width * width][1];
// Bring in the initial board
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
board_stack[0][i] = board[i];
}
__syncthreads();
if (threadIdx.x == 0) {
pivot_count = 0;
*total = 0;
candidate_stack[0] = first_candidate;
bit_stack[0] = board_stack[0][first_candidate];
}
while(true) {
// At this point, the stack top [stack_size - 1] contains
// a reduced board, the cell to branch on, and the candidates
// that hasn't been tried.
__syncthreads();
if (bit_stack[stack_size - 1] == 0) { // no more candidates
if (stack_size == 1) { // stack is going to be empty
break;
}
stack_size--;
continue;
}
// Copy the board to a new stack frame
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
board_stack[stack_size][i] = board_stack[stack_size - 1][i];
}
// Choose the branch
T mask = ~((T)1 << (__ffsll(bit_stack[stack_size - 1]) - 1));
__syncthreads();
if (threadIdx.x == 0) {
bit_stack[stack_size - 1] &= mask; // remove from candidate set
board_stack[stack_size][candidate_stack[stack_size - 1]] = ~mask; // Fix the candidate cell
}
__syncthreads();
// Propagate from the chosen branch
Propagate<T, rank, pivot_buffer_max>(board_stack[stack_size], pivots, &pivot_count, candidate_stack[stack_size - 1], mask);
__syncthreads();
// Reduce the rest
if (!Reduce<T, rank, pivot_buffer_max>(board_stack[stack_size], pivots, &pivot_count)) {
// if the reduce found a conflict, we discard this branch and the new stack frame
continue;
}
__syncthreads();
if (stack_size == max_depth - 1) {
// Reached the stack top, output
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
(*output_boards)[i] = board_stack[stack_size][i];
}
output_boards += 1;
} else {
// Prepare for deeper stack.
FindMins<T, rank, 1>(board_stack[stack_size], min_value_board, min_pos_board);
__syncthreads();
if (min_pos_board[0][0] == INVALID_POS) {
// We actually found a solution. Dump to output and abort
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
(*output_boards_start)[i] = board_stack[stack_size][i];
}
if (threadIdx.x == 0) {
*total = SOLVED;
}
return;
} else {
// make the new stack frame a real frame
if (threadIdx.x == 0) {
candidate_stack[stack_size] = min_pos_board[0][0];
bit_stack[stack_size] = board_stack[stack_size][candidate_stack[stack_size]];
}
stack_size++;
}
}
}
if (threadIdx.x == 0) {
*total = output_boards - output_boards_start;
}
}
template <typename T, unsigned rank, unsigned pivot_buffer_max>
__global__
void Initialize(T (&output_board)[rank * rank * rank * rank],
int (&clue)[rank * rank * rank * rank], bool *init_success) {
constexpr unsigned width = rank * rank;
__shared__ T board[width * width];
__shared__ unsigned pivots[pivot_buffer_max];
__shared__ unsigned pivot_count;
if (threadIdx.x == 0) {
pivot_count = 0;
*init_success = true;
}
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
board[i] = (T)(((T)1 << width) - 1);
}
__syncthreads();
for (unsigned i = 0; i < width * width; i += 1) {
if (clue[i] == 0) continue;
T flags = (T)1 << (clue[i] - 1);
if (threadIdx.x == 0) {
board[i] = flags;
}
__syncthreads();
Propagate<T, rank, pivot_buffer_max>(board, pivots, &pivot_count, i, ~flags);
__syncthreads();
if (!Reduce<T, rank, pivot_buffer_max>(board, pivots, &pivot_count)) {
if (threadIdx.x == 0) {
*init_success = false;
}
return;
}
__syncthreads();
}
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
output_board[i] = board[i];
}
}
template <typename T, unsigned rank>
__global__
void Finalize(T (&board)[rank * rank * rank * rank],
int (&clue)[rank * rank * rank * rank]) {
constexpr unsigned width = rank * rank;
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
if (__popcll(board[i]) == 1) {
clue[i] = __ffsll(board[i]);
} else {
clue[i] = 0;
}
}
}
#define checkError(code) { checkErrorImpl((code), __FILE__, __LINE__); }
void checkErrorImpl(hipError_t code, const char* file, int line) {
if (code != hipSuccess) {
std::printf("[%s: %d]CUDA Error: %s\n", file, line, hipGetErrorString(code));
exit(-1);
}
}
template <typename T, unsigned rank, unsigned pivot_buffer_max, unsigned max_depth, unsigned max_guess>
void SolveImpl(int *clue) {
constexpr unsigned width = rank * rank;
// Transfer clue table to gpu
int (*clue_gpu)[width * width];
checkError(hipMalloc((void**)&clue_gpu, sizeof(int) * width * width));
checkError(hipMemcpy(clue_gpu, clue, sizeof(int) * width * width, hipMemcpyHostToDevice));
// Stack of boards
T (*master_stack)[width * width];
checkError(hipMalloc((void**)&master_stack, 1024 * 1024 * 1024));
// arrays of buffers. Each array for a guess point
T (*(guess_buffer[max_guess]))[width * width];
for (unsigned i = 0; i < max_guess; ++i) {
checkError(hipMalloc((void**)&guess_buffer[i], 512 * 1024 * 1024));
}
// array of counters for receiving the solution count
unsigned (*total_counter_buffer)[max_guess];
checkError(hipMalloc((void**)&total_counter_buffer, max_guess * sizeof(unsigned)));
// Buffer for scanning guess points
unsigned (*min_pos_board)[max_guess];
checkError(hipMalloc((void**)&min_pos_board, max_guess * sizeof(unsigned)));
unsigned min_pos_board_cpu[max_guess];
// Streams for running each guesses
hipStream_t stream[max_guess];
for (unsigned i = 0; i < max_guess; ++i) {
checkError(hipStreamCreate(&stream[i]));
}
// For receiving the init result
bool init_success, *init_success_gpu;
checkError(hipMalloc((void**)&init_success_gpu, sizeof(bool)));
// Initialize at stack bottom
hipLaunchKernelGGL(( Initialize<T, rank, pivot_buffer_max>), dim3(1), dim3(::min(width * width, 1024u)), 0, 0,
master_stack[0], *clue_gpu, init_success_gpu);
checkError(hipMemcpy(&init_success, init_success_gpu, sizeof(bool), hipMemcpyDeviceToHost));
if (!init_success) {
std::printf("Found conflict in init!\n");
}
// Boards available in the stack. We have an initial one already
unsigned master_stack_size = 1;
if (init_success) while(true) {
//printf("%u\n", master_stack_size);
// scan guess points for the stack top
hipLaunchKernelGGL(( FindMinsKernel<T, rank, max_guess>), dim3(1), dim3(::min(width * width, 1024u)), 0, 0,
master_stack[master_stack_size - 1], *min_pos_board);
checkError(hipMemcpy(min_pos_board_cpu, min_pos_board,
max_guess * sizeof(unsigned), hipMemcpyDeviceToHost));
if (min_pos_board_cpu[0] == INVALID_POS) {
// No guess point. The board is actually solved
break;
}
// dispatch solver for each guess point
unsigned guess = 0;
for (; guess < max_guess && min_pos_board_cpu[guess] != INVALID_POS; ++guess) {
hipLaunchKernelGGL(( SearchTree<T, rank, max_depth, pivot_buffer_max>)
, dim3(1), dim3(::min(width * width, 256u)), 0, stream[guess],
master_stack[master_stack_size - 1], guess_buffer[guess],
&(*total_counter_buffer)[guess], min_pos_board_cpu[guess]);
}
checkError(hipDeviceSynchronize());
// find the guess that has fewest result and pump to the stack
unsigned total_counter_buffer_cpu[max_guess];
checkError(hipMemcpy(total_counter_buffer_cpu,
total_counter_buffer, guess * sizeof(unsigned),
hipMemcpyDeviceToHost));
// check if ther is already a solved board
auto solved = std::find(
total_counter_buffer_cpu,
total_counter_buffer_cpu + guess, SOLVED)
- total_counter_buffer_cpu;
if (solved != guess) {
checkError(hipMemcpy(
&master_stack[master_stack_size - 1],
guess_buffer[solved],
width * width * sizeof(T), hipMemcpyDeviceToDevice));
break;
}
unsigned min_guess = std::min_element(
total_counter_buffer_cpu,
total_counter_buffer_cpu + guess)
- total_counter_buffer_cpu;
checkError(hipMemcpy(
&master_stack[master_stack_size - 1],
guess_buffer[min_guess],
width * width * total_counter_buffer_cpu[min_guess] * sizeof(T),
hipMemcpyDeviceToDevice));
master_stack_size += total_counter_buffer_cpu[min_guess];
master_stack_size -= 1;
if (master_stack_size == 0) {
std::printf("No solution!\n");
master_stack_size = 1;
break;
}
}
hipLaunchKernelGGL(( Finalize<T, rank>), dim3(1), dim3(::min(width * width, 1024u)), 0, 0,
master_stack[master_stack_size - 1], *clue_gpu);
checkError(hipMemcpy(
clue, clue_gpu,
width * width * sizeof(int), hipMemcpyDeviceToHost
));
checkError(hipFree(init_success_gpu));
checkError(hipFree(min_pos_board));
checkError(hipFree(total_counter_buffer));
checkError(hipFree(master_stack));
checkError(hipFree(clue_gpu));
for (unsigned i = 0; i < max_guess; ++i) {
checkError(hipFree(guess_buffer[i]));
checkError(hipStreamDestroy(stream[i]));
}
}
void Solve(unsigned rank, int* clue) {
switch(rank) {
case 2:
SolveImpl<unsigned char, 2, 100, 4, 4>(clue);
break;
case 3:
SolveImpl<unsigned short, 3, 100, 4, 4>(clue);
break;
case 4:
SolveImpl<unsigned short, 4, 100, 4, 4>(clue);
break;
case 5:
SolveImpl<unsigned int, 5, 100, 4, 4>(clue);
break;
case 6:
SolveImpl<unsigned long long, 6, 100, 4, 4>(clue);
break;
}
}
int main(int argc, char** argv) {
unsigned rank = 3;
if (argc >= 2) {
rank = std::atoi(argv[1]);
}
unsigned width = rank * rank;
int* clue = new int[width * width];
for (unsigned i = 0; i < width * width; ++i) {
int input;
if (1 != std::scanf("%d", &input)) {
input = 0;
}
clue[i] = input;
}
Solve(rank, clue);
for (int row = 0; row < width; ++row) {
if (row % rank == 0) {
std::printf("\n");
}
for (int col = 0; col < width; ++col) {
if (col % rank == 0) {
std::printf(" ");
}
std::printf("%2d ", clue[row * width + col]);
}
std::printf("\n");
}
}
| 2a157e8366072686541c24ca547ebbeab7d4b96c.cu |
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <time.h>
#include <algorithm>
typedef unsigned char u8;
constexpr unsigned CONFLICT = 0xFFFFFFFF;
constexpr unsigned SOLVED = 0xFFFFFFFE;
constexpr unsigned INVALID_POS = 0xFFFFFFFD;
template <typename T, unsigned rank, unsigned pivot_buffer_max>
__device__
void Propagate(T (&board)[rank * rank * rank * rank],
unsigned (&pivots)[pivot_buffer_max], unsigned* pivot_count, unsigned pivot, T mask) {
constexpr unsigned width = rank * rank;
// Calculate neighbor positions
unsigned row = pivot / width;
unsigned col = pivot % width;
unsigned square_row = row / rank;
unsigned square_col = col / rank;
unsigned index = threadIdx.x % (width - 1);
if (threadIdx.x < width - 1) {
// first (width - 1) threads take care of cells in the same row
col = index + (index >= col);
} else if (threadIdx.x < 2 * width - 2) {
// next (width - 1) threads take care of cells in the same column
row = index + (index >= row);
} else if (threadIdx.x < 2 * width - 2 + (rank - 1) * (rank - 1)) {
// next (rank - 1)^2 threads take care of cells in the same square
unsigned new_row = index / (rank - 1) + square_row * rank;
unsigned new_col = index % (rank - 1) + square_col * rank;
row = new_row + (new_row >= row);
col = new_col + (new_col >= col);
} else {
return;
}
// Remove candidates from neighbor and counts the rest candidate
unsigned neighbor = row * width + col;
unsigned old = __popcll(board[neighbor]);
unsigned left = __popcll(board[neighbor] &= mask);
if (left < 2 && left < old) {
unsigned i = atomicAdd(pivot_count, 1);
if (i >= pivot_buffer_max) {
printf("Pivot buffer overflow");
return;
}
// left = 0 -> conflict
// left = 1 -> new pivot
pivots[i] = CONFLICT * (1 - left) + neighbor * left;
}
}
template <typename T, unsigned rank, unsigned pivot_buffer_max>
__device__
bool Reduce(T (&board)[rank * rank * rank * rank],
unsigned (&pivots)[pivot_buffer_max], unsigned* pivot_count) {
while(*pivot_count != 0) {
// Fetch the next pivot
__syncthreads();
unsigned pivot = pivots[*pivot_count - 1];
__syncthreads();
if (threadIdx.x == 0) {
--*pivot_count;
}
__syncthreads();
// If previously encountered a conflict, abort
if (pivot == CONFLICT) {
if (threadIdx.x == 0) {
*pivot_count = 0;
}
return false;
}
__syncthreads();
Propagate<T, rank, pivot_buffer_max>(board, pivots, pivot_count, pivot, ~board[pivot]);
__syncthreads();
}
return true;
}
template <unsigned result_length>
__device__ __host__
void MinMerge(u8 (&value_a)[result_length], unsigned (&pos_a)[result_length],
u8 (&value_b)[result_length], unsigned (&pos_b)[result_length]) {
// Insertion sort to avoid extra space
unsigned a_index = 0;
for (unsigned b_index = 0; b_index < result_length; ++b_index) {
while(true) {
if (a_index == result_length) {
return;
}
if (value_b[b_index] > value_a[a_index]) {
++a_index;
} else {
break;
}
}
for (unsigned a_i2 = result_length - 1; a_i2 > a_index; --a_i2) {
pos_a[a_i2] = pos_a[a_i2 - 1];
value_a[a_i2] = value_a[a_i2 - 1];
}
pos_a[a_index] = pos_b[b_index];
value_a[a_index] = value_b[b_index];
++a_index;
}
}
// Finds result_length minimal candidates on the board,
// ignoring decided cells
template <typename T, unsigned rank, unsigned result_length>
__device__
void FindMins(T (&board)[rank * rank * rank * rank],
u8 (&values)[rank * rank * rank * rank][result_length],
unsigned (&pos)[rank * rank * rank * rank][result_length]) {
constexpr unsigned count = rank * rank * rank * rank;
for (unsigned i = threadIdx.x; i < count; i += blockDim.x) {
u8 count = (u8)__popcll(board[i]);
if (count == 1) {
values[i][0] = 0xFF;
pos[i][0] = INVALID_POS;
} else {
values[i][0] = count;
pos[i][0] = i;
}
for (unsigned j = 1; j < result_length; ++j) {
values[i][j] = 0xFF;
pos[i][j] = INVALID_POS;
}
}
__syncthreads();
unsigned step = 1 << (31 - __clz(count - 1));
for (; step > 0; step >>= 1) {
unsigned limit = min(step, count - step);
for (unsigned i = threadIdx.x; i < limit; i += blockDim.x) {
MinMerge<result_length>(values[i], pos[i], values[i + step], pos[i + step]);
}
__syncthreads();
}
}
template <typename T, unsigned rank, unsigned result_length>
__global__
void FindMinsKernel(T (&board)[rank * rank * rank * rank], unsigned (&result_pos)[result_length]) {
__shared__ u8 values[rank * rank * rank * rank][result_length];
__shared__ unsigned pos[rank * rank * rank * rank][result_length];
FindMins<T, rank, result_length>(board, values, pos);
__syncthreads();
for (unsigned i = threadIdx.x; i < result_length; i += blockDim.x) {
result_pos[i] = pos[0][i];
}
}
template <typename T, unsigned rank, unsigned max_depth, unsigned pivot_buffer_max>
__global__
void SearchTree(T (&board)[rank * rank * rank * rank],
T output_boards_start[][rank * rank * rank * rank], unsigned* total, unsigned first_candidate) {
constexpr unsigned width = rank * rank;
auto output_boards = output_boards_start;
// DFS stack. Each stack frame consists of
// board_stack[*]: snapshot of the board in reduced state
// candidate_stack[*]: position of the cell being branched on
// bit_stack[*]: bit flags of the branching cell.
// Candidates that have been tried or being tried is removed from the flags
__shared__ T board_stack[max_depth][width * width];
__shared__ unsigned candidate_stack[max_depth - 1];
__shared__ T bit_stack[max_depth - 1];
unsigned stack_size = 1; // [1, max_depth)
// scratch spaces for Reducer
__shared__ unsigned pivots[pivot_buffer_max];
__shared__ unsigned pivot_count;
// scratch spaces for popcnt min
__shared__ u8 min_value_board[width * width][1];
__shared__ unsigned min_pos_board[width * width][1];
// Bring in the initial board
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
board_stack[0][i] = board[i];
}
__syncthreads();
if (threadIdx.x == 0) {
pivot_count = 0;
*total = 0;
candidate_stack[0] = first_candidate;
bit_stack[0] = board_stack[0][first_candidate];
}
while(true) {
// At this point, the stack top [stack_size - 1] contains
// a reduced board, the cell to branch on, and the candidates
// that hasn't been tried.
__syncthreads();
if (bit_stack[stack_size - 1] == 0) { // no more candidates
if (stack_size == 1) { // stack is going to be empty
break;
}
stack_size--;
continue;
}
// Copy the board to a new stack frame
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
board_stack[stack_size][i] = board_stack[stack_size - 1][i];
}
// Choose the branch
T mask = ~((T)1 << (__ffsll(bit_stack[stack_size - 1]) - 1));
__syncthreads();
if (threadIdx.x == 0) {
bit_stack[stack_size - 1] &= mask; // remove from candidate set
board_stack[stack_size][candidate_stack[stack_size - 1]] = ~mask; // Fix the candidate cell
}
__syncthreads();
// Propagate from the chosen branch
Propagate<T, rank, pivot_buffer_max>(board_stack[stack_size], pivots, &pivot_count, candidate_stack[stack_size - 1], mask);
__syncthreads();
// Reduce the rest
if (!Reduce<T, rank, pivot_buffer_max>(board_stack[stack_size], pivots, &pivot_count)) {
// if the reduce found a conflict, we discard this branch and the new stack frame
continue;
}
__syncthreads();
if (stack_size == max_depth - 1) {
// Reached the stack top, output
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
(*output_boards)[i] = board_stack[stack_size][i];
}
output_boards += 1;
} else {
// Prepare for deeper stack.
FindMins<T, rank, 1>(board_stack[stack_size], min_value_board, min_pos_board);
__syncthreads();
if (min_pos_board[0][0] == INVALID_POS) {
// We actually found a solution. Dump to output and abort
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
(*output_boards_start)[i] = board_stack[stack_size][i];
}
if (threadIdx.x == 0) {
*total = SOLVED;
}
return;
} else {
// make the new stack frame a real frame
if (threadIdx.x == 0) {
candidate_stack[stack_size] = min_pos_board[0][0];
bit_stack[stack_size] = board_stack[stack_size][candidate_stack[stack_size]];
}
stack_size++;
}
}
}
if (threadIdx.x == 0) {
*total = output_boards - output_boards_start;
}
}
template <typename T, unsigned rank, unsigned pivot_buffer_max>
__global__
void Initialize(T (&output_board)[rank * rank * rank * rank],
int (&clue)[rank * rank * rank * rank], bool *init_success) {
constexpr unsigned width = rank * rank;
__shared__ T board[width * width];
__shared__ unsigned pivots[pivot_buffer_max];
__shared__ unsigned pivot_count;
if (threadIdx.x == 0) {
pivot_count = 0;
*init_success = true;
}
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
board[i] = (T)(((T)1 << width) - 1);
}
__syncthreads();
for (unsigned i = 0; i < width * width; i += 1) {
if (clue[i] == 0) continue;
T flags = (T)1 << (clue[i] - 1);
if (threadIdx.x == 0) {
board[i] = flags;
}
__syncthreads();
Propagate<T, rank, pivot_buffer_max>(board, pivots, &pivot_count, i, ~flags);
__syncthreads();
if (!Reduce<T, rank, pivot_buffer_max>(board, pivots, &pivot_count)) {
if (threadIdx.x == 0) {
*init_success = false;
}
return;
}
__syncthreads();
}
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
output_board[i] = board[i];
}
}
template <typename T, unsigned rank>
__global__
void Finalize(T (&board)[rank * rank * rank * rank],
int (&clue)[rank * rank * rank * rank]) {
constexpr unsigned width = rank * rank;
for (unsigned i = threadIdx.x; i < width * width; i += blockDim.x) {
if (__popcll(board[i]) == 1) {
clue[i] = __ffsll(board[i]);
} else {
clue[i] = 0;
}
}
}
#define checkError(code) { checkErrorImpl((code), __FILE__, __LINE__); }
void checkErrorImpl(cudaError_t code, const char* file, int line) {
if (code != cudaSuccess) {
std::printf("[%s: %d]CUDA Error: %s\n", file, line, cudaGetErrorString(code));
exit(-1);
}
}
template <typename T, unsigned rank, unsigned pivot_buffer_max, unsigned max_depth, unsigned max_guess>
void SolveImpl(int *clue) {
constexpr unsigned width = rank * rank;
// Transfer clue table to gpu
int (*clue_gpu)[width * width];
checkError(cudaMalloc((void**)&clue_gpu, sizeof(int) * width * width));
checkError(cudaMemcpy(clue_gpu, clue, sizeof(int) * width * width, cudaMemcpyHostToDevice));
// Stack of boards
T (*master_stack)[width * width];
checkError(cudaMalloc((void**)&master_stack, 1024 * 1024 * 1024));
// arrays of buffers. Each array for a guess point
T (*(guess_buffer[max_guess]))[width * width];
for (unsigned i = 0; i < max_guess; ++i) {
checkError(cudaMalloc((void**)&guess_buffer[i], 512 * 1024 * 1024));
}
// array of counters for receiving the solution count
unsigned (*total_counter_buffer)[max_guess];
checkError(cudaMalloc((void**)&total_counter_buffer, max_guess * sizeof(unsigned)));
// Buffer for scanning guess points
unsigned (*min_pos_board)[max_guess];
checkError(cudaMalloc((void**)&min_pos_board, max_guess * sizeof(unsigned)));
unsigned min_pos_board_cpu[max_guess];
// Streams for running each guesses
cudaStream_t stream[max_guess];
for (unsigned i = 0; i < max_guess; ++i) {
checkError(cudaStreamCreate(&stream[i]));
}
// For receiving the init result
bool init_success, *init_success_gpu;
checkError(cudaMalloc((void**)&init_success_gpu, sizeof(bool)));
// Initialize at stack bottom
Initialize<T, rank, pivot_buffer_max><<<1, std::min(width * width, 1024u)>>>
(master_stack[0], *clue_gpu, init_success_gpu);
checkError(cudaMemcpy(&init_success, init_success_gpu, sizeof(bool), cudaMemcpyDeviceToHost));
if (!init_success) {
std::printf("Found conflict in init!\n");
}
// Boards available in the stack. We have an initial one already
unsigned master_stack_size = 1;
if (init_success) while(true) {
//printf("%u\n", master_stack_size);
// scan guess points for the stack top
FindMinsKernel<T, rank, max_guess><<<1, std::min(width * width, 1024u)>>>
(master_stack[master_stack_size - 1], *min_pos_board);
checkError(cudaMemcpy(min_pos_board_cpu, min_pos_board,
max_guess * sizeof(unsigned), cudaMemcpyDeviceToHost));
if (min_pos_board_cpu[0] == INVALID_POS) {
// No guess point. The board is actually solved
break;
}
// dispatch solver for each guess point
unsigned guess = 0;
for (; guess < max_guess && min_pos_board_cpu[guess] != INVALID_POS; ++guess) {
SearchTree<T, rank, max_depth, pivot_buffer_max>
<<<1, std::min(width * width, 256u), 0, stream[guess]>>>
(master_stack[master_stack_size - 1], guess_buffer[guess],
&(*total_counter_buffer)[guess], min_pos_board_cpu[guess]);
}
checkError(cudaDeviceSynchronize());
// find the guess that has fewest result and pump to the stack
unsigned total_counter_buffer_cpu[max_guess];
checkError(cudaMemcpy(total_counter_buffer_cpu,
total_counter_buffer, guess * sizeof(unsigned),
cudaMemcpyDeviceToHost));
// check if ther is already a solved board
auto solved = std::find(
total_counter_buffer_cpu,
total_counter_buffer_cpu + guess, SOLVED)
- total_counter_buffer_cpu;
if (solved != guess) {
checkError(cudaMemcpy(
&master_stack[master_stack_size - 1],
guess_buffer[solved],
width * width * sizeof(T), cudaMemcpyDeviceToDevice));
break;
}
unsigned min_guess = std::min_element(
total_counter_buffer_cpu,
total_counter_buffer_cpu + guess)
- total_counter_buffer_cpu;
checkError(cudaMemcpy(
&master_stack[master_stack_size - 1],
guess_buffer[min_guess],
width * width * total_counter_buffer_cpu[min_guess] * sizeof(T),
cudaMemcpyDeviceToDevice));
master_stack_size += total_counter_buffer_cpu[min_guess];
master_stack_size -= 1;
if (master_stack_size == 0) {
std::printf("No solution!\n");
master_stack_size = 1;
break;
}
}
Finalize<T, rank><<<1, std::min(width * width, 1024u)>>>
(master_stack[master_stack_size - 1], *clue_gpu);
checkError(cudaMemcpy(
clue, clue_gpu,
width * width * sizeof(int), cudaMemcpyDeviceToHost
));
checkError(cudaFree(init_success_gpu));
checkError(cudaFree(min_pos_board));
checkError(cudaFree(total_counter_buffer));
checkError(cudaFree(master_stack));
checkError(cudaFree(clue_gpu));
for (unsigned i = 0; i < max_guess; ++i) {
checkError(cudaFree(guess_buffer[i]));
checkError(cudaStreamDestroy(stream[i]));
}
}
void Solve(unsigned rank, int* clue) {
switch(rank) {
case 2:
SolveImpl<unsigned char, 2, 100, 4, 4>(clue);
break;
case 3:
SolveImpl<unsigned short, 3, 100, 4, 4>(clue);
break;
case 4:
SolveImpl<unsigned short, 4, 100, 4, 4>(clue);
break;
case 5:
SolveImpl<unsigned int, 5, 100, 4, 4>(clue);
break;
case 6:
SolveImpl<unsigned long long, 6, 100, 4, 4>(clue);
break;
}
}
int main(int argc, char** argv) {
unsigned rank = 3;
if (argc >= 2) {
rank = std::atoi(argv[1]);
}
unsigned width = rank * rank;
int* clue = new int[width * width];
for (unsigned i = 0; i < width * width; ++i) {
int input;
if (1 != std::scanf("%d", &input)) {
input = 0;
}
clue[i] = input;
}
Solve(rank, clue);
for (int row = 0; row < width; ++row) {
if (row % rank == 0) {
std::printf("\n");
}
for (int col = 0; col < width; ++col) {
if (col % rank == 0) {
std::printf(" ");
}
std::printf("%2d ", clue[row * width + col]);
}
std::printf("\n");
}
}
|
149c6b58f49f981ded85c6a113eed71ff44f3864.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mask_constrain.hpp"
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
void mask_constrain_opt::init(){
init_base();
}
void mask_constrain_opt::reshape(int num, int channel, int height, int width){
if(!reshape_base(num,channel,height,width)) return ;
group_in_ = channel_ / ngroup_;
group_out_ = num_ / ngroup_;
}
template <typename scalar_t>
__global__ void conv_mask_v5_kernel(const int nthreads, scalar_t* const weight,
const int channel, const int sz, const int group_in, const int group_out){
CUDA_KERNEL_LOOP(index, nthreads) {
int tw = index % sz;
int th = (index / sz) % sz;
int tc = (index / sz / sz) % channel / group_in;
int tn = index / sz / sz / channel / group_out;
if (tw + th + tc >= tn + sz - 1)
weight[index] = scalar_t(0);
}
}
template <typename scalar_t>
__global__ void conv_mask_v6_kernel(const int nthreads, scalar_t* const weight,
const int channel, const int sz, const int group_in, const int group_out){
CUDA_KERNEL_LOOP(index, nthreads) {
int tw = index % sz;
int th = (index / sz) % sz;
int tc = (index / sz / sz) % channel / group_in;
int tn = index / sz / sz / channel / group_out;
if (tw + th + tc > tn + sz - 1)
weight[index] = scalar_t(0);
}
}
template <typename scalar_t>
__global__ void mask_constrain_forward_kernel(const int nthreads, const scalar_t* const input,
scalar_t * const output, const int inner_shape) {
CUDA_KERNEL_LOOP(index, nthreads) {
output[index] = input[index];
}
}
void mask_constrain_opt::forward_cuda(at::Tensor bottom_data)
{
reshape(bottom_data.size(0), bottom_data.size(1), bottom_data.size(2), bottom_data.size(3));
int count = num_ * channel_ * width_ * height_;
AT_DISPATCH_FLOATING_TYPES(
bottom_data.scalar_type(), "mask_constrain_forward_cuda",
([&] {
timer_->start();
if(constrain_==5){
hipLaunchKernelGGL(( conv_mask_v5_kernel), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream_,
count, bottom_data.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}else{
hipLaunchKernelGGL(( conv_mask_v6_kernel), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream_,
count, bottom_data.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}
CUDA_POST_KERNEL_CHECK;
timer_->stop("kernel 1");
}
)
);
return ;
}
void mask_constrain_opt::backward_cuda(at::Tensor top_diff)
{
int count= num_ * channel_ * width_ * height_;
AT_DISPATCH_FLOATING_TYPES(
top_diff.scalar_type(), "mask_constrain_backward_cuda",
([&] {
timer_->start();
if(constrain_==5){
hipLaunchKernelGGL(( conv_mask_v5_kernel), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream_,
count, top_diff.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}else{
hipLaunchKernelGGL(( conv_mask_v6_kernel), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream_,
count, top_diff.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}
CUDA_POST_KERNEL_CHECK;
timer_->stop("kernel 1");
}
)
);
return ;
} | 149c6b58f49f981ded85c6a113eed71ff44f3864.cu | #include "mask_constrain.hpp"
#include <curand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
void mask_constrain_opt::init(){
init_base();
}
void mask_constrain_opt::reshape(int num, int channel, int height, int width){
if(!reshape_base(num,channel,height,width)) return ;
group_in_ = channel_ / ngroup_;
group_out_ = num_ / ngroup_;
}
template <typename scalar_t>
__global__ void conv_mask_v5_kernel(const int nthreads, scalar_t* const weight,
const int channel, const int sz, const int group_in, const int group_out){
CUDA_KERNEL_LOOP(index, nthreads) {
int tw = index % sz;
int th = (index / sz) % sz;
int tc = (index / sz / sz) % channel / group_in;
int tn = index / sz / sz / channel / group_out;
if (tw + th + tc >= tn + sz - 1)
weight[index] = scalar_t(0);
}
}
template <typename scalar_t>
__global__ void conv_mask_v6_kernel(const int nthreads, scalar_t* const weight,
const int channel, const int sz, const int group_in, const int group_out){
CUDA_KERNEL_LOOP(index, nthreads) {
int tw = index % sz;
int th = (index / sz) % sz;
int tc = (index / sz / sz) % channel / group_in;
int tn = index / sz / sz / channel / group_out;
if (tw + th + tc > tn + sz - 1)
weight[index] = scalar_t(0);
}
}
template <typename scalar_t>
__global__ void mask_constrain_forward_kernel(const int nthreads, const scalar_t* const input,
scalar_t * const output, const int inner_shape) {
CUDA_KERNEL_LOOP(index, nthreads) {
output[index] = input[index];
}
}
void mask_constrain_opt::forward_cuda(at::Tensor bottom_data)
{
reshape(bottom_data.size(0), bottom_data.size(1), bottom_data.size(2), bottom_data.size(3));
int count = num_ * channel_ * width_ * height_;
AT_DISPATCH_FLOATING_TYPES(
bottom_data.scalar_type(), "mask_constrain_forward_cuda",
([&] {
timer_->start();
if(constrain_==5){
conv_mask_v5_kernel<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_>>>
(count, bottom_data.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}else{
conv_mask_v6_kernel<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_>>>
(count, bottom_data.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}
CUDA_POST_KERNEL_CHECK;
timer_->stop("kernel 1");
}
)
);
return ;
}
void mask_constrain_opt::backward_cuda(at::Tensor top_diff)
{
int count= num_ * channel_ * width_ * height_;
AT_DISPATCH_FLOATING_TYPES(
top_diff.scalar_type(), "mask_constrain_backward_cuda",
([&] {
timer_->start();
if(constrain_==5){
conv_mask_v5_kernel<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_>>>
(count, top_diff.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}else{
conv_mask_v6_kernel<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_>>>
(count, top_diff.data_ptr<scalar_t>(), channel_, width_, group_in_, group_out_);
}
CUDA_POST_KERNEL_CHECK;
timer_->stop("kernel 1");
}
)
);
return ;
} |
12416eaf1ef8b22ccaae1775197f61be33333914.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv.cu, normal z -> s, Sun Nov 20 20:20:30 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_s
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "strsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ float shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
strsv_notrans_kernel_outplace(
int n,
const float * __restrict__ A, int lda,
float *b, int incb,
float *x)
{
strsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
strsv_trans_kernel_outplace(
int n,
const float * __restrict__ A, int lda,
float *b, int incb,
float *x)
{
strsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_strsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr b, magma_int_t incb,
magmaFloat_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(float);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the strsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_strsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr b, magma_int_t incb,
magmaFloat_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_slaset( MagmaFull, n, incb, MAGMA_S_ZERO, MAGMA_S_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_sgemv will cause slow down
magma_sgemv( MagmaNoTrans, jb, i, MAGMA_S_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_S_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_sgemv( MagmaNoTrans, jb, i, MAGMA_S_ONE, A(col, 0), lda,
x, 1, MAGMA_S_ONE, x+col, 1, queue );
}
magmablas_strsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_sgemv( MagmaConjTrans, i, jb, MAGMA_S_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_S_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_sgemv( MagmaConjTrans, i, jb, MAGMA_S_ONE, A(0, col), lda, x, 1, MAGMA_S_ONE, x+col, 1, queue );
}
magmablas_strsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
strsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA REAL array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db REAL array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_strsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloat_ptr dx=NULL;
magma_smalloc( &dx, size_x );
magmablas_slaset( MagmaFull, n, 1, MAGMA_S_ZERO, MAGMA_S_ZERO, dx, n, queue );
magmablas_strsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_slacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
| 12416eaf1ef8b22ccaae1775197f61be33333914.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv.cu, normal z -> s, Sun Nov 20 20:20:30 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_s
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "strsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ float shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
strsv_notrans_kernel_outplace(
int n,
const float * __restrict__ A, int lda,
float *b, int incb,
float *x)
{
strsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
strsv_trans_kernel_outplace(
int n,
const float * __restrict__ A, int lda,
float *b, int incb,
float *x)
{
strsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_strsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr b, magma_int_t incb,
magmaFloat_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(float);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
strsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the strsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_strsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloat_const_ptr A, magma_int_t lda,
magmaFloat_ptr b, magma_int_t incb,
magmaFloat_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_slaset( MagmaFull, n, incb, MAGMA_S_ZERO, MAGMA_S_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_sgemv will cause slow down
magma_sgemv( MagmaNoTrans, jb, i, MAGMA_S_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_S_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_sgemv( MagmaNoTrans, jb, i, MAGMA_S_ONE, A(col, 0), lda,
x, 1, MAGMA_S_ONE, x+col, 1, queue );
}
magmablas_strsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_sgemv( MagmaConjTrans, i, jb, MAGMA_S_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_S_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_sgemv( MagmaConjTrans, i, jb, MAGMA_S_ONE, A(0, col), lda, x, 1, MAGMA_S_ONE, x+col, 1, queue );
}
magmablas_strsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
strsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA REAL array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db REAL array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_strsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloat_ptr dx=NULL;
magma_smalloc( &dx, size_x );
magmablas_slaset( MagmaFull, n, 1, MAGMA_S_ZERO, MAGMA_S_ZERO, dx, n, queue );
magmablas_strsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_slacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
|
9a8a8dfbdd63db1206034a596ac434db06173830.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <cfloat>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/margin_inner_product_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Weight_norm_gpu(int nthreads, const int K_,
Dtype* weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += weight[index * K_ + i] * weight[index * K_ + i];
}
sum_sqaure = sqrt(sum_sqaure);
for (int i = 0; i < K_; i++) {
weight[index * K_ + i] = weight[index * K_ + i] / sum_sqaure;
}
}
}
template <typename Dtype>
__global__ void Compute_bottom_norm_gpu(int nthreads, const int K_,
const Dtype* bottom, Dtype* x_norm) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += bottom[index * K_ + i] * bottom[index * K_ + i];
}
x_norm[index] = sqrt(sum_sqaure);
}
}
template <typename Dtype>
__global__ void Compute_cos_theta_gpu(int nthreads, const int N_,
const Dtype* x_norm, Dtype* cos_theta) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / N_;
if (x_norm[i] > 1e-3)
cos_theta[index] = cos_theta[index] / x_norm[i];
else
cos_theta[index] = 1;
}
}
template <typename Dtype>
__global__ void Compute_sign_1_gpu(int nthreads, const Dtype* cos_theta, Dtype* sign_1) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_1[index] = abs(cos_theta[index]) - (Dtype)0.5;
}
}
template <typename Dtype>
__global__ void Compute_sign_2_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_1, Dtype* sign_2) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_2[index] = sign_0[index] * ((Dtype)1. + sign_1[index]) - (Dtype)2.;
}
}
template <typename Dtype>
__global__ void Compute_sign_3_gpu(int nthreads, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* sign_3) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_3[index] = sign_0[index] * ((Dtype)2. * cos_theta_quadratic[index] - (Dtype)1.);
}
}
template <typename Dtype>
__global__ void Compute_sign_4_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_3, Dtype* sign_4) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_4[index] = (Dtype)2. * sign_0[index] + sign_3[index] - (Dtype)3.;
}
}
template <typename Dtype>
__global__ void Margin_double_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
if (x_norm[i] > 1e-3)
{
top[index] += x_norm[i] * ((Dtype)2. * sign_0[index] * cos_theta_quadratic[index] -
(Dtype)1.);
top[index] /= ((Dtype)1. + lambda);
}
else
top[index] = (Dtype)0.;
}
}
}
template <typename Dtype>
__global__ void Margin_triple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2,
const Dtype* cos_theta, const Dtype* cos_theta_cubic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_1[index] * ((Dtype)4. * cos_theta_cubic[index] -
(Dtype)3. * cos_theta[index]) + sign_2[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_quadruple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta_quadratic, const Dtype* cos_theta_quartic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_3[index] * ((Dtype)8. * cos_theta_quartic[index] -
(Dtype)8. * cos_theta_quadratic[index] + (Dtype)1.) + sign_4[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_double_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta,
const Dtype* cos_theta_quadratic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
if (x_norm[i] > 1e-3)
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
else
bottom_diff[index] = (Dtype)0.;
} else {
if (x_norm[i] > 1e-3)
{
Dtype coeff_w = (Dtype)4. * sign_0[i * N_ + n] * cos_theta[i * N_ + n];
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)2. * sign_0[i * N_ + n] *
cos_theta_quadratic[i * N_ + n] + (Dtype)1.);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
else
bottom_diff[index] = (Dtype)0.;
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_triple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_1[i * N_ + n] * ((Dtype)12. * cos_theta_quadratic[i * N_ + n] - (Dtype)3.);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)8. * sign_1[i * N_ + n] * cos_theta_cubic[i * N_ + n] -
sign_2[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_quadruple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, const Dtype* cos_theta_quartic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_3[i * N_ + n] * ((Dtype)32. * cos_theta_cubic[i * N_ + n] - (Dtype)16. * cos_theta[i * N_ + n]);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * (sign_3[i * N_ + n] * ((Dtype)24. * cos_theta_quartic[i * N_ + n] -
(Dtype)8. * cos_theta_quadratic[i * N_ + n] - 1) - sign_4[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
iter_ += (Dtype)1.;
Dtype base_ = this->layer_param_.margin_inner_product_param().base();
Dtype gamma_ = this->layer_param_.margin_inner_product_param().gamma();
Dtype power_ = this->layer_param_.margin_inner_product_param().power();
Dtype lambda_min_ = this->layer_param_.margin_inner_product_param().lambda_min();
lambda_ = base_ * powf(((Dtype)1. + gamma_ * iter_), -power_);
lambda_ = max(lambda_, lambda_min_);
top[1]->mutable_cpu_data()[0] = lambda_;
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* label = bottom[1]->gpu_data();
/************************* normalize weight *************************/
int nthreads = N_;
hipLaunchKernelGGL(( Weight_norm_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_,
this->blobs_[0]->mutable_gpu_data());
/************************* common variables *************************/
// x_norm_ = |x|
nthreads = M_;
hipLaunchKernelGGL(( Compute_bottom_norm_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, bottom_data,
x_norm_.mutable_gpu_data());
nthreads = M_ * N_;
// cos_theta = x'w / |x|
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., cos_theta_.mutable_gpu_data());
hipLaunchKernelGGL(( Compute_cos_theta_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, x_norm_.gpu_data(), cos_theta_.mutable_gpu_data());
// sign_0
caffe_gpu_sign(M_ * N_, cos_theta_.gpu_data(), sign_0_.mutable_gpu_data());
/************************* optional variables *************************/
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// cos_theta_quadratic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
// cos_theta_quadratic && cos_theta_cubic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
// sign_1 = sign(abs(cos_theta) - 0.5)
hipLaunchKernelGGL(( Compute_sign_1_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, cos_theta_.gpu_data(), sign_1_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_1_.gpu_data(), sign_1_.mutable_gpu_data());
// sign_2 = sign_0 * (1 + sign_1) - 2
hipLaunchKernelGGL(( Compute_sign_2_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(),
sign_1_.gpu_data(), sign_2_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
// cos_theta_quadratic && cos_theta_cubic && cos_theta_quartic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)4., cos_theta_quartic_.mutable_gpu_data());
// sign_3 = sign_0 * sign(2 * cos_theta_quadratic_ - 1)
hipLaunchKernelGGL(( Compute_sign_3_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(),
sign_3_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_3_.gpu_data(), sign_3_.mutable_gpu_data());
// sign_4 = 2 * sign_0 + sign_3 - 3
hipLaunchKernelGGL(( Compute_sign_4_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(),
sign_3_.gpu_data(), sign_4_.mutable_gpu_data());
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
/************************* Forward *************************/
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// caffe_gpu_memcpy(M_ * N_, cos_theta_.gpu_data(), top_data);
hipLaunchKernelGGL(( Margin_double_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(),
sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
hipLaunchKernelGGL(( Margin_triple_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_1_.gpu_data(),
sign_2_.gpu_data(), cos_theta_.gpu_data(),
cos_theta_cubic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
hipLaunchKernelGGL(( Margin_quadruple_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_3_.gpu_data(),
sign_4_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_quartic_.gpu_data(), top_data);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
if (this->param_propagate_down_[0]) {
// Gradient with respect to weight
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff());
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// Gradient with respect to bottom data
int nthreads = M_ * K_;
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, this->blobs_[0]->gpu_data(), (Dtype)0.,
bottom[0]->mutable_gpu_diff());
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
hipLaunchKernelGGL(( Margin_bottom_double_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_0_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
hipLaunchKernelGGL(( Margin_bottom_triple_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(),
cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
hipLaunchKernelGGL(( Margin_bottom_quadruple_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_cubic_.gpu_data(), cos_theta_quartic_.gpu_data(),
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MarginInnerProductLayer);
} // namespace caffe
| 9a8a8dfbdd63db1206034a596ac434db06173830.cu | #include <vector>
#include <cfloat>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/margin_inner_product_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Weight_norm_gpu(int nthreads, const int K_,
Dtype* weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += weight[index * K_ + i] * weight[index * K_ + i];
}
sum_sqaure = sqrt(sum_sqaure);
for (int i = 0; i < K_; i++) {
weight[index * K_ + i] = weight[index * K_ + i] / sum_sqaure;
}
}
}
template <typename Dtype>
__global__ void Compute_bottom_norm_gpu(int nthreads, const int K_,
const Dtype* bottom, Dtype* x_norm) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype sum_sqaure = 0.;
for (int i = 0; i < K_; i++) {
sum_sqaure += bottom[index * K_ + i] * bottom[index * K_ + i];
}
x_norm[index] = sqrt(sum_sqaure);
}
}
template <typename Dtype>
__global__ void Compute_cos_theta_gpu(int nthreads, const int N_,
const Dtype* x_norm, Dtype* cos_theta) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / N_;
if (x_norm[i] > 1e-3)
cos_theta[index] = cos_theta[index] / x_norm[i];
else
cos_theta[index] = 1;
}
}
template <typename Dtype>
__global__ void Compute_sign_1_gpu(int nthreads, const Dtype* cos_theta, Dtype* sign_1) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_1[index] = abs(cos_theta[index]) - (Dtype)0.5;
}
}
template <typename Dtype>
__global__ void Compute_sign_2_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_1, Dtype* sign_2) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_2[index] = sign_0[index] * ((Dtype)1. + sign_1[index]) - (Dtype)2.;
}
}
template <typename Dtype>
__global__ void Compute_sign_3_gpu(int nthreads, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* sign_3) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_3[index] = sign_0[index] * ((Dtype)2. * cos_theta_quadratic[index] - (Dtype)1.);
}
}
template <typename Dtype>
__global__ void Compute_sign_4_gpu(int nthreads, const Dtype* sign_0,
const Dtype* sign_3, Dtype* sign_4) {
CUDA_KERNEL_LOOP(index, nthreads) {
sign_4[index] = (Dtype)2. * sign_0[index] + sign_3[index] - (Dtype)3.;
}
}
template <typename Dtype>
__global__ void Margin_double_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_0,
const Dtype* cos_theta_quadratic, Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
if (x_norm[i] > 1e-3)
{
top[index] += x_norm[i] * ((Dtype)2. * sign_0[index] * cos_theta_quadratic[index] -
(Dtype)1.);
top[index] /= ((Dtype)1. + lambda);
}
else
top[index] = (Dtype)0.;
}
}
}
template <typename Dtype>
__global__ void Margin_triple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2,
const Dtype* cos_theta, const Dtype* cos_theta_cubic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_1[index] * ((Dtype)4. * cos_theta_cubic[index] -
(Dtype)3. * cos_theta[index]) + sign_2[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_quadruple_forward_gpu(int nthreads, const int N_, Dtype lambda,
const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta_quadratic, const Dtype* cos_theta_quartic,
Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
// the label[i]_th top_data
const int i = index / N_;
const int j = index % N_;
const int label_value = static_cast<int>(label[i]);
if (label_value == j) {
top[index] *= lambda;
top[index] += x_norm[i] * (sign_3[index] * ((Dtype)8. * cos_theta_quartic[index] -
(Dtype)8. * cos_theta_quadratic[index] + (Dtype)1.) + sign_4[index]);
top[index] /= ((Dtype)1. + lambda);
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_double_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta,
const Dtype* cos_theta_quadratic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
if (x_norm[i] > 1e-3)
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
else
bottom_diff[index] = (Dtype)0.;
} else {
if (x_norm[i] > 1e-3)
{
Dtype coeff_w = (Dtype)4. * sign_0[i * N_ + n] * cos_theta[i * N_ + n];
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)2. * sign_0[i * N_ + n] *
cos_theta_quadratic[i * N_ + n] + (Dtype)1.);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
else
bottom_diff[index] = (Dtype)0.;
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_triple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_1[i * N_ + n] * ((Dtype)12. * cos_theta_quadratic[i * N_ + n] - (Dtype)3.);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)8. * sign_1[i * N_ + n] * cos_theta_cubic[i * N_ + n] -
sign_2[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
__global__ void Margin_bottom_quadruple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda,
const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label,
const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4,
const Dtype* cos_theta, const Dtype* cos_theta_quadratic,
const Dtype* cos_theta_cubic, const Dtype* cos_theta_quartic, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index / K_;
const int j = index % K_;
bottom_diff[index] = (Dtype)0.;
const int label_value = static_cast<int>(label[i]);
for (int n = 0; n < N_; n++) {
if (label_value != n) {
bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j];
} else {
Dtype coeff_w = sign_3[i * N_ + n] * ((Dtype)32. * cos_theta_cubic[i * N_ + n] - (Dtype)16. * cos_theta[i * N_ + n]);
Dtype coeff_x = - (Dtype)1./ x_norm[i] * (sign_3[i * N_ + n] * ((Dtype)24. * cos_theta_quartic[i * N_ + n] -
(Dtype)8. * cos_theta_quadratic[i * N_ + n] - 1) - sign_4[i * N_ + n]);
Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x);
coeff_w = coeff_w / coeff_norm;
coeff_x = coeff_x / coeff_norm;
bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] *
(coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]);
bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j];
}
}
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
iter_ += (Dtype)1.;
Dtype base_ = this->layer_param_.margin_inner_product_param().base();
Dtype gamma_ = this->layer_param_.margin_inner_product_param().gamma();
Dtype power_ = this->layer_param_.margin_inner_product_param().power();
Dtype lambda_min_ = this->layer_param_.margin_inner_product_param().lambda_min();
lambda_ = base_ * powf(((Dtype)1. + gamma_ * iter_), -power_);
lambda_ = max(lambda_, lambda_min_);
top[1]->mutable_cpu_data()[0] = lambda_;
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* label = bottom[1]->gpu_data();
/************************* normalize weight *************************/
int nthreads = N_;
Weight_norm_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_,
this->blobs_[0]->mutable_gpu_data());
/************************* common variables *************************/
// x_norm_ = |x|
nthreads = M_;
Compute_bottom_norm_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom_data,
x_norm_.mutable_gpu_data());
nthreads = M_ * N_;
// cos_theta = x'w / |x|
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., cos_theta_.mutable_gpu_data());
Compute_cos_theta_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, x_norm_.gpu_data(), cos_theta_.mutable_gpu_data());
// sign_0
caffe_gpu_sign(M_ * N_, cos_theta_.gpu_data(), sign_0_.mutable_gpu_data());
/************************* optional variables *************************/
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// cos_theta_quadratic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
// cos_theta_quadratic && cos_theta_cubic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
// sign_1 = sign(abs(cos_theta) - 0.5)
Compute_sign_1_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, cos_theta_.gpu_data(), sign_1_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_1_.gpu_data(), sign_1_.mutable_gpu_data());
// sign_2 = sign_0 * (1 + sign_1) - 2
Compute_sign_2_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(),
sign_1_.gpu_data(), sign_2_.mutable_gpu_data());
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
// cos_theta_quadratic && cos_theta_cubic && cos_theta_quartic
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data());
caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)4., cos_theta_quartic_.mutable_gpu_data());
// sign_3 = sign_0 * sign(2 * cos_theta_quadratic_ - 1)
Compute_sign_3_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(),
sign_3_.mutable_gpu_data());
caffe_gpu_sign(M_ * N_, sign_3_.gpu_data(), sign_3_.mutable_gpu_data());
// sign_4 = 2 * sign_0 + sign_3 - 3
Compute_sign_4_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(),
sign_3_.gpu_data(), sign_4_.mutable_gpu_data());
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
/************************* Forward *************************/
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
// caffe_gpu_memcpy(M_ * N_, cos_theta_.gpu_data(), top_data);
Margin_double_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(),
sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
Margin_triple_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_1_.gpu_data(),
sign_2_.gpu_data(), cos_theta_.gpu_data(),
cos_theta_cubic_.gpu_data(), top_data);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
Margin_quadruple_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_3_.gpu_data(),
sign_4_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_quartic_.gpu_data(), top_data);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
template <typename Dtype>
void MarginInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
if (this->param_propagate_down_[0]) {
// Gradient with respect to weight
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff());
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// Gradient with respect to bottom data
int nthreads = M_ * K_;
switch (type_) {
case MarginInnerProductParameter_MarginType_SINGLE:
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, this->blobs_[0]->gpu_data(), (Dtype)0.,
bottom[0]->mutable_gpu_diff());
break;
case MarginInnerProductParameter_MarginType_DOUBLE:
Margin_bottom_double_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_0_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_TRIPLE:
Margin_bottom_triple_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(),
cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(),
bottom_diff);
break;
case MarginInnerProductParameter_MarginType_QUADRUPLE:
Margin_bottom_quadruple_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label,
x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(),
cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(),
cos_theta_cubic_.gpu_data(), cos_theta_quartic_.gpu_data(),
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown margin type.";
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MarginInnerProductLayer);
} // namespace caffe
|
3341eaf28fc14c5f654e7b095df4556012e504e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
using namespace std;
// includes, kernels
#include "matrixmul_kernel.cuh"
#include "matrixmul.h"
#include "matrixmul_gold.cpp"
// include helper header
#include "tiledMatMult.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
void ExtractFromPadded(Matrix M, const Matrix& Mpadded);
bool CompareResults(float* A, float* B, int elements, float eps);
int ReadFile(Matrix* M, char* file_name);
bool ReadParams(int* params, int size, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
#define MAT_MAX_SIZE 2048 //Set lower to be quick for grading
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
/*
This is the random way provided initially.
int dummy;
dummy = rand() % MAT_MAX_SIZE;
int Mh = (dummy==0? 1: dummy);
dummy = rand() % MAT_MAX_SIZE;
int Mw = (dummy==0? 1: dummy);
M = AllocateMatrix(Mh, Mw, 1);
dummy = rand() % MAT_MAX_SIZE;
int Nw = (dummy==0? 1: dummy);
N = AllocateMatrix(Mw, Nw, 1);
P = AllocateMatrix(Mh, Nw, 0);*/
//This is to have square matrices to answer question 6
int dummy;
int Mh = MAT_MAX_SIZE;
int Mw = MAT_MAX_SIZE;
int Nw = MAT_MAX_SIZE;
M = AllocateMatrix(Mh, Mw, 1);
N = AllocateMatrix(Mw, Nw, 1);
P = AllocateMatrix(Mh, Nw, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
ReadParams(params, data_read, argv[1]);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
printf("Start CPU computation\n");
//Start cpu timing here
hipEvent_t startCPU, stopCPU;
hipEventCreate(&startCPU);
hipEventCreate(&stopCPU);
hipEventRecord(startCPU, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
//Stop cpu timing here
hipEventRecord(stopCPU, 0);
hipEventSynchronize(stopCPU);
float cpuTime;
hipEventElapsedTime(&cpuTime, startCPU, stopCPU);
hipEventDestroy(startCPU);
hipEventDestroy(stopCPU);
//Output timing
printf("CPU time: %f ms. \n", cpuTime);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
bool res = CompareResults(reference.elements, P.elements, P.height*P.width, 0.01f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
printf("Dimension M[height,width]: %d %d\n", M.height, M.width);
printf("Dimension N[height,width]: %d %d\n", N.height, N.width);
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Multiply on the device
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix Munpadded, const Matrix Nunpadded, Matrix Punpadded)
{
// I'm going to take care of the padding here...
Matrix M = PaddedMatrix(Munpadded, BLOCK_SIZE, 1);
Matrix N = PaddedMatrix(Nunpadded, BLOCK_SIZE, 1);
Matrix P = PaddedMatrix(Punpadded, BLOCK_SIZE, 0);
//Start inclusive timing here
hipEvent_t startIn, stopIn;
hipEventCreate(&startIn);
hipEventCreate(&stopIn);
hipEventRecord(startIn, 0);
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, Punpadded); // Clear memory
//Start exclusive timing here
hipEvent_t startEx, stopEx;
hipEventCreate(&startEx);
hipEventCreate(&stopEx);
hipEventRecord(startEx, 0);
// Setup the execution configuration
// Come up with the number of blocks you need to call
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N.width/dimBlock.x,M.height/dimBlock.y);
// Launch the device computation threads
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md, Nd, Pd);
//Stop exclusive timing here
hipEventRecord(stopEx, 0);
hipEventSynchronize(stopEx);
float exTime;
hipEventElapsedTime(&exTime, startEx, stopEx);
hipEventDestroy(startEx);
hipEventDestroy(stopEx);
// Read P from the device and then extract the submatrix with the result
CopyFromDeviceMatrix(P, Pd);
ExtractFromPadded(Punpadded, P);
//Stop inclusive timing here
hipEventRecord(stopIn, 0);
hipEventSynchronize(stopIn);
float inTime;
hipEventElapsedTime(&inTime, startIn, stopIn);
hipEventDestroy(startIn);
hipEventDestroy(stopIn);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
// Free the helper padded matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
//Output timing
printf("Inclusive time: %f ms. \n", inTime);
printf("Exclusive time: %f ms. \n", exTime);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
hipError_t error;
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
error = hipMalloc((void**)&Mdevice.elements, size);
if (error != hipSuccess)
{
printf("hipMalloc returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
//compare the data stored in two arrays on the host
bool CompareResults(float* A, float* B, int elements, float eps)
{
for(unsigned int i = 0; i < elements; i++){
float error = fabs(A[i]-B[i]);
if(error>eps){
return false;
}
}
return true;
}
bool ReadParams(int* params, int size, char* file_name){
ifstream ifile(file_name);
int i=0;
for(int i=0; i<size; i++){
if(ifile.fail()==false){
ifile>>params[i];
}
}
return (i==size)? 1:0;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
std::ifstream ifile(file_name);
unsigned int i = 0;
for(; i < data_read; i++){
ifile>>M->elements[i];
}
ifile.close();
return (i==data_read)? 0:1;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
std::ofstream ofile(file_name);
for(unsigned int i = 0; i < M.width*M.height; i++){
ofile<<M.elements[i]<<" ";
}
ofile.close();
}
// Given a matrix M, produce a padded matrix that has both dimensions a
// multiple of BLKSZ. The elements of the original M matrix can be
// copied over to the new padded matrix provided the flag copyEntries
// is not zero. Note that the assumption is that M.pitch <= M.width;
Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries)
{
Matrix Mpadded;
int dummy = (M.height - 1)/BLKSZ + 1;
Mpadded.height = dummy*BLKSZ;
dummy = (M.width - 1)/BLKSZ + 1;
Mpadded.width = dummy*BLKSZ;
Mpadded.pitch = M.width;
Mpadded.elements = (float*) calloc(Mpadded.width*Mpadded.height, sizeof(float));
// copy entries of original matrix only if asked to
if( copyEntries ) {
for( int i=0; i<M.height; i++) {
memcpy(&Mpadded.elements[i*Mpadded.width], &M.elements[i*M.width], M.width*sizeof(float));
}
}
return Mpadded;
}
// The submatrix of dimensions M.width by M.height of Mpadded is copied over
// from Mpadded into M. Note that the assumption is that M.pitch <= M.width;
void ExtractFromPadded(Matrix M, const Matrix& Mpadded)
{
if( Mpadded.pitch!=M.width ) {
printf("Error extracting data from padded matrix: Number of rows %d, %d\n", Mpadded.pitch, M.width);
exit(1);
}
if( Mpadded.height<M.height ) {
printf("Error extracting data from padded matrix: Height too small%d, %d\n", Mpadded.height, M.height);
exit(1);
}
for( int i=0; i<M.height; i++) {
memcpy(&M.elements[i*M.width], &Mpadded.elements[i*Mpadded.width], M.width*sizeof(float));
}
return;
}
| 3341eaf28fc14c5f654e7b095df4556012e504e5.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
using namespace std;
// includes, kernels
#include "matrixmul_kernel.cuh"
#include "matrixmul.h"
#include "matrixmul_gold.cpp"
// include helper header
#include "tiledMatMult.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
void ExtractFromPadded(Matrix M, const Matrix& Mpadded);
bool CompareResults(float* A, float* B, int elements, float eps);
int ReadFile(Matrix* M, char* file_name);
bool ReadParams(int* params, int size, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
#define MAT_MAX_SIZE 2048 //Set lower to be quick for grading
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
/*
This is the random way provided initially.
int dummy;
dummy = rand() % MAT_MAX_SIZE;
int Mh = (dummy==0? 1: dummy);
dummy = rand() % MAT_MAX_SIZE;
int Mw = (dummy==0? 1: dummy);
M = AllocateMatrix(Mh, Mw, 1);
dummy = rand() % MAT_MAX_SIZE;
int Nw = (dummy==0? 1: dummy);
N = AllocateMatrix(Mw, Nw, 1);
P = AllocateMatrix(Mh, Nw, 0);*/
//This is to have square matrices to answer question 6
int dummy;
int Mh = MAT_MAX_SIZE;
int Mw = MAT_MAX_SIZE;
int Nw = MAT_MAX_SIZE;
M = AllocateMatrix(Mh, Mw, 1);
N = AllocateMatrix(Mw, Nw, 1);
P = AllocateMatrix(Mh, Nw, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
ReadParams(params, data_read, argv[1]);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
printf("Start CPU computation\n");
//Start cpu timing here
cudaEvent_t startCPU, stopCPU;
cudaEventCreate(&startCPU);
cudaEventCreate(&stopCPU);
cudaEventRecord(startCPU, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
//Stop cpu timing here
cudaEventRecord(stopCPU, 0);
cudaEventSynchronize(stopCPU);
float cpuTime;
cudaEventElapsedTime(&cpuTime, startCPU, stopCPU);
cudaEventDestroy(startCPU);
cudaEventDestroy(stopCPU);
//Output timing
printf("CPU time: %f ms. \n", cpuTime);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
bool res = CompareResults(reference.elements, P.elements, P.height*P.width, 0.01f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
printf("Dimension M[height,width]: %d %d\n", M.height, M.width);
printf("Dimension N[height,width]: %d %d\n", N.height, N.width);
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Multiply on the device
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix Munpadded, const Matrix Nunpadded, Matrix Punpadded)
{
// I'm going to take care of the padding here...
Matrix M = PaddedMatrix(Munpadded, BLOCK_SIZE, 1);
Matrix N = PaddedMatrix(Nunpadded, BLOCK_SIZE, 1);
Matrix P = PaddedMatrix(Punpadded, BLOCK_SIZE, 0);
//Start inclusive timing here
cudaEvent_t startIn, stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, Punpadded); // Clear memory
//Start exclusive timing here
cudaEvent_t startEx, stopEx;
cudaEventCreate(&startEx);
cudaEventCreate(&stopEx);
cudaEventRecord(startEx, 0);
// Setup the execution configuration
// Come up with the number of blocks you need to call
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N.width/dimBlock.x,M.height/dimBlock.y);
// Launch the device computation threads
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd);
//Stop exclusive timing here
cudaEventRecord(stopEx, 0);
cudaEventSynchronize(stopEx);
float exTime;
cudaEventElapsedTime(&exTime, startEx, stopEx);
cudaEventDestroy(startEx);
cudaEventDestroy(stopEx);
// Read P from the device and then extract the submatrix with the result
CopyFromDeviceMatrix(P, Pd);
ExtractFromPadded(Punpadded, P);
//Stop inclusive timing here
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
float inTime;
cudaEventElapsedTime(&inTime, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
// Free the helper padded matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
//Output timing
printf("Inclusive time: %f ms. \n", inTime);
printf("Exclusive time: %f ms. \n", exTime);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
cudaError_t error;
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
error = cudaMalloc((void**)&Mdevice.elements, size);
if (error != cudaSuccess)
{
printf("cudaMalloc returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
//compare the data stored in two arrays on the host
bool CompareResults(float* A, float* B, int elements, float eps)
{
for(unsigned int i = 0; i < elements; i++){
float error = fabs(A[i]-B[i]);
if(error>eps){
return false;
}
}
return true;
}
bool ReadParams(int* params, int size, char* file_name){
ifstream ifile(file_name);
int i=0;
for(int i=0; i<size; i++){
if(ifile.fail()==false){
ifile>>params[i];
}
}
return (i==size)? 1:0;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
std::ifstream ifile(file_name);
unsigned int i = 0;
for(; i < data_read; i++){
ifile>>M->elements[i];
}
ifile.close();
return (i==data_read)? 0:1;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
std::ofstream ofile(file_name);
for(unsigned int i = 0; i < M.width*M.height; i++){
ofile<<M.elements[i]<<" ";
}
ofile.close();
}
// Given a matrix M, produce a padded matrix that has both dimensions a
// multiple of BLKSZ. The elements of the original M matrix can be
// copied over to the new padded matrix provided the flag copyEntries
// is not zero. Note that the assumption is that M.pitch <= M.width;
Matrix PaddedMatrix(const Matrix& M, const int BLKSZ, int copyEntries)
{
Matrix Mpadded;
int dummy = (M.height - 1)/BLKSZ + 1;
Mpadded.height = dummy*BLKSZ;
dummy = (M.width - 1)/BLKSZ + 1;
Mpadded.width = dummy*BLKSZ;
Mpadded.pitch = M.width;
Mpadded.elements = (float*) calloc(Mpadded.width*Mpadded.height, sizeof(float));
// copy entries of original matrix only if asked to
if( copyEntries ) {
for( int i=0; i<M.height; i++) {
memcpy(&Mpadded.elements[i*Mpadded.width], &M.elements[i*M.width], M.width*sizeof(float));
}
}
return Mpadded;
}
// The submatrix of dimensions M.width by M.height of Mpadded is copied over
// from Mpadded into M. Note that the assumption is that M.pitch <= M.width;
void ExtractFromPadded(Matrix M, const Matrix& Mpadded)
{
if( Mpadded.pitch!=M.width ) {
printf("Error extracting data from padded matrix: Number of rows %d, %d\n", Mpadded.pitch, M.width);
exit(1);
}
if( Mpadded.height<M.height ) {
printf("Error extracting data from padded matrix: Height too small%d, %d\n", Mpadded.height, M.height);
exit(1);
}
for( int i=0; i<M.height; i++) {
memcpy(&M.elements[i*M.width], &Mpadded.elements[i*Mpadded.width], M.width*sizeof(float));
}
return;
}
|
b9aef039eb1a6e985ba8840b2cb09a16438701d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//MULTIPLICACIN DE MATRICES CON SHARED MEMORY
/* Informacin para tener en cuenta:
* A thread block will be divided into WarpsPerBlock = (ThreadsPerBlock + WarpSize - 1) / WarpSize
* Para leer ms : http://stackoverflow.com/questions/10460742/how-do-cuda-blocks-warps-threads-map-onto-cuda-cores
*/
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<cuda.h>
using namespace std;
#define TILE_WIDTH 32 //mximo?
__global__
void MultiplicaMatricesCU(int* A,int filA,int colA,int* B,int filB,int colB,int* C){//filC=filA,colC=colB
//Tamao total de los elementos con que vamos a trabajar
__shared__ float A_s[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_s[TILE_WIDTH][TILE_WIDTH];
//Para saber en qu bloque y qu hilo estamos
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = gridDim.x;
int gy = gridDim.y;
//Para el resultado de C
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int suma = 0;//para llevar la suma de las multiplicaciones
int n = 0, m = 0;
while(m < gx && n < gy){
/* De A queremos sacar las columnas, por eso:
* col = ( ( m * TILE_WIDTH ) + tx )
* col = ( ( bx * TILE_WIDTH ) + tx )
* Hacemos la comparacin entre ambas.
* Vemos que m se mueve entre los bloques en el eje x (las columnas)
*/
if(( ( m * TILE_WIDTH ) + tx ) < colA && row < filA) //Si no se pasa
A_s[ty][tx] = A[ (row * colA) + ( ( m * TILE_WIDTH ) + tx )];//(Row*colA + k), donde k-> 0..filB (filB = colA)
else A_s[ty][tx] = 0;
/* De B queremos sacar las filas, por eso:
* row = ( ( m * TILE_WIDTH ) + tx )
* row = ( ( by * TILE_WIDTH ) + tx )
* Hacemos la comparacin entre ambas.
* Vemos que n se mueve entre los bloques en el eje y (las filas)
*/
if(( n * TILE_WIDTH + ty) < filB && col < colB)
B_s[ty][tx] = B[( ( n * TILE_WIDTH + ty) * colB ) + col ];//(k*colB)+Col, donde k-> 0..filB
else B_s[ty][tx] = 0;
m++; n++;
__syncthreads();//espera a todos los hilos
for (int k=0; k < TILE_WIDTH ; ++k) {
suma += A_s[ty][k] * B_s[k][tx];
}
__syncthreads();
}
if(row < filA && col < colB)
C[ (row * colB) + col] = suma; //C[filA][colB]
}
__host__
void multiplicaMatrices(int* X,int filX,int colX,int* Y,int filY,int colY,int* Z){
for(int i=0;i<filX;i++){
for(int j=0;j<colY;j++){
int suma=0;
for(int k=0;k<filY;k++){
suma=suma+X[(i*colX)+k]*Y[(k*colY)+j];
}
Z[(i*colY)+j]=suma;
}
}
}
__host__
void imprime(int* A,int filas, int columnas){//imprime como si fuera una matriz
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
cout<<A[(i*columnas)+j]<<" ";
}
cout<<endl;
}
}
__host__
void inicializa(int *A,int filas, int columnas){//inicializa arreglos
for(int i=0;i<filas*columnas;i++){
A[i]=1;
}
}
__host__
bool compara(int *A, int *B, int filas, int columnas){
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
if(A[i*columnas+j] != B[i*columnas+j]) return false;
}
}
return true;
}
int main(void){
clock_t startCPU,endCPU,startGPU,endGPU;
hipError_t error = hipSuccess;
int *A,*B,*C; //A[filA][colA],B[filB][colB],C[filA][colB]
int *d_A,*d_B,*d_C,*h_C;
//int filA=2048,colA=2048,filB=2048,colB=2048;
int filA=1,colA=1024,filB=1024,colB=1;
//-------------------------------CPU--------------------------------------------------------------------
A=(int*)malloc(filA*colA*sizeof(int));
B=(int*)malloc(filB*colB*sizeof(int));
C=(int*)malloc(filA*colB*sizeof(int));
inicializa(A,filA,colA);
inicializa(B,filB,colB);
if(colA==filB){//para que sean multiplicables
startCPU = clock();
multiplicaMatrices(A,filA,colA,B,filB,colB,C);
endCPU = clock();
//imprime(C,filA,colB);
}else{
cout<<"Error, no se pueden multiplicar"<<endl;
return 0;
}
double time_CPU=((double)(endCPU-startCPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la CPU fue: "<<time_CPU<<endl;
//-------------------------------GPU--------------------------------------------------------------------
h_C=(int*)malloc(filA*colB*sizeof(int));
startGPU = clock();
error=hipMalloc((void**)&d_A,filA*colA*sizeof(int));
if(error != hipSuccess){
cout<<"Error reservando memoria para d_A"<<endl;
//return -1;
}
hipMalloc((void**)&d_B,filB*colB*sizeof(int));
if(error != hipSuccess){
cout<<"Error reservando memoria para d_B"<<endl;
//return -1;
}
hipMalloc((void**)&d_C,filA*colB*sizeof(int));
if(error != hipSuccess){
cout<<"Error reservando memoria para d_C"<<endl;
//return -1;
}
hipMemcpy(d_A,A,filA*colA*sizeof(int),hipMemcpyHostToDevice);//destino d_A y origen A
hipMemcpy(d_B,B,filB*colB*sizeof(int),hipMemcpyHostToDevice);
//Depende directamente de la dimensin de las matrices
dim3 dimblock(32,32,1);
dim3 dimGrid(32,32,1);
//dim3 dimGrid(ceil((double)(colB/32)),ceil((double)(filA/32)),1);
hipLaunchKernelGGL(( MultiplicaMatricesCU), dim3(dimGrid),dim3(dimblock), 0, 0, d_A,filA,colA,d_B,filB,colB,d_C);
hipDeviceSynchronize();
hipMemcpy(h_C,d_C,filA*colB*sizeof(int),hipMemcpyDeviceToHost);
endGPU = clock();
//imprime(h_C,filA,colB);
double time_GPU=((double)(endGPU-startGPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la GPU fue: "<<time_GPU<<endl;
//-----------------------------------------------------------------------------------
cout<<"El tiempo de aceleramiento fue: "<<time_CPU/time_GPU<<endl;
if(compara(h_C, C, filA, colB)) cout << "Buen clculo" << endl;
else cout << "Mal clculo" << endl;
free(A);free(B);free(C);free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
| b9aef039eb1a6e985ba8840b2cb09a16438701d1.cu | //MULTIPLICACIÓN DE MATRICES CON SHARED MEMORY
/* Información para tener en cuenta:
* A thread block will be divided into WarpsPerBlock = (ThreadsPerBlock + WarpSize - 1) / WarpSize
* Para leer más : http://stackoverflow.com/questions/10460742/how-do-cuda-blocks-warps-threads-map-onto-cuda-cores
*/
#include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<cuda.h>
using namespace std;
#define TILE_WIDTH 32 //¿máximo?
__global__
void MultiplicaMatricesCU(int* A,int filA,int colA,int* B,int filB,int colB,int* C){//filC=filA,colC=colB
//Tamaño total de los elementos con que vamos a trabajar
__shared__ float A_s[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_s[TILE_WIDTH][TILE_WIDTH];
//Para saber en qué bloque y qué hilo estamos
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = gridDim.x;
int gy = gridDim.y;
//Para el resultado de C
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int suma = 0;//para llevar la suma de las multiplicaciones
int n = 0, m = 0;
while(m < gx && n < gy){
/* De A queremos sacar las columnas, por eso:
* col = ( ( m * TILE_WIDTH ) + tx )
* col = ( ( bx * TILE_WIDTH ) + tx )
* Hacemos la comparación entre ambas.
* Vemos que m se mueve entre los bloques en el eje x (las columnas)
*/
if(( ( m * TILE_WIDTH ) + tx ) < colA && row < filA) //Si no se pasa
A_s[ty][tx] = A[ (row * colA) + ( ( m * TILE_WIDTH ) + tx )];//(Row*colA + k), donde k-> 0..filB (filB = colA)
else A_s[ty][tx] = 0;
/* De B queremos sacar las filas, por eso:
* row = ( ( m * TILE_WIDTH ) + tx )
* row = ( ( by * TILE_WIDTH ) + tx )
* Hacemos la comparación entre ambas.
* Vemos que n se mueve entre los bloques en el eje y (las filas)
*/
if(( n * TILE_WIDTH + ty) < filB && col < colB)
B_s[ty][tx] = B[( ( n * TILE_WIDTH + ty) * colB ) + col ];//(k*colB)+Col, donde k-> 0..filB
else B_s[ty][tx] = 0;
m++; n++;
__syncthreads();//espera a todos los hilos
for (int k=0; k < TILE_WIDTH ; ++k) {
suma += A_s[ty][k] * B_s[k][tx];
}
__syncthreads();
}
if(row < filA && col < colB)
C[ (row * colB) + col] = suma; //C[filA][colB]
}
__host__
void multiplicaMatrices(int* X,int filX,int colX,int* Y,int filY,int colY,int* Z){
for(int i=0;i<filX;i++){
for(int j=0;j<colY;j++){
int suma=0;
for(int k=0;k<filY;k++){
suma=suma+X[(i*colX)+k]*Y[(k*colY)+j];
}
Z[(i*colY)+j]=suma;
}
}
}
__host__
void imprime(int* A,int filas, int columnas){//imprime como si fuera una matriz
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
cout<<A[(i*columnas)+j]<<" ";
}
cout<<endl;
}
}
__host__
void inicializa(int *A,int filas, int columnas){//inicializa arreglos
for(int i=0;i<filas*columnas;i++){
A[i]=1;
}
}
__host__
bool compara(int *A, int *B, int filas, int columnas){
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
if(A[i*columnas+j] != B[i*columnas+j]) return false;
}
}
return true;
}
int main(void){
clock_t startCPU,endCPU,startGPU,endGPU;
cudaError_t error = cudaSuccess;
int *A,*B,*C; //A[filA][colA],B[filB][colB],C[filA][colB]
int *d_A,*d_B,*d_C,*h_C;
//int filA=2048,colA=2048,filB=2048,colB=2048;
int filA=1,colA=1024,filB=1024,colB=1;
//-------------------------------CPU--------------------------------------------------------------------
A=(int*)malloc(filA*colA*sizeof(int));
B=(int*)malloc(filB*colB*sizeof(int));
C=(int*)malloc(filA*colB*sizeof(int));
inicializa(A,filA,colA);
inicializa(B,filB,colB);
if(colA==filB){//para que sean multiplicables
startCPU = clock();
multiplicaMatrices(A,filA,colA,B,filB,colB,C);
endCPU = clock();
//imprime(C,filA,colB);
}else{
cout<<"Error, no se pueden multiplicar"<<endl;
return 0;
}
double time_CPU=((double)(endCPU-startCPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la CPU fue: "<<time_CPU<<endl;
//-------------------------------GPU--------------------------------------------------------------------
h_C=(int*)malloc(filA*colB*sizeof(int));
startGPU = clock();
error=cudaMalloc((void**)&d_A,filA*colA*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_A"<<endl;
//return -1;
}
cudaMalloc((void**)&d_B,filB*colB*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_B"<<endl;
//return -1;
}
cudaMalloc((void**)&d_C,filA*colB*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_C"<<endl;
//return -1;
}
cudaMemcpy(d_A,A,filA*colA*sizeof(int),cudaMemcpyHostToDevice);//destino d_A y origen A
cudaMemcpy(d_B,B,filB*colB*sizeof(int),cudaMemcpyHostToDevice);
//Depende directamente de la dimensión de las matrices
dim3 dimblock(32,32,1);
dim3 dimGrid(32,32,1);
//dim3 dimGrid(ceil((double)(colB/32)),ceil((double)(filA/32)),1);
MultiplicaMatricesCU<<<dimGrid,dimblock>>>(d_A,filA,colA,d_B,filB,colB,d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C,d_C,filA*colB*sizeof(int),cudaMemcpyDeviceToHost);
endGPU = clock();
//imprime(h_C,filA,colB);
double time_GPU=((double)(endGPU-startGPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la GPU fue: "<<time_GPU<<endl;
//-----------------------------------------------------------------------------------
cout<<"El tiempo de aceleramiento fue: "<<time_CPU/time_GPU<<endl;
if(compara(h_C, C, filA, colB)) cout << "Buen cálculo" << endl;
else cout << "Mal cálculo" << endl;
free(A);free(B);free(C);free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
9f75b7a23bb2b17990aaa825ab5ee90f7b357b0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 by Contributors
* \file sigmoid_cross_entropy.cu
* \brief
* \author Yuntao Chen
*/
#include "./sigmoid_cross_entropy-inl.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
constexpr int CAFFE_CUDA_NUM_THREADS = 512;
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;
inline int CAFFE_GET_BLOCKS(const int N) {
return ::min((N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS,
CAFFE_MAXIMUM_NUM_BLOCKS);
}
namespace mshadow {
namespace cuda {
template<typename T>
__global__ void SigmoidCrossEntropyLossKernel(
const int n,
const T* logits,
const T* targets,
T* losses,
T* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
losses[index] = 0.;
counts[index] = 0.;
} else {
losses[index] =
-1. * logits[index] * (targets[index] - (logits[index] >= 0)) +
logf(
1 +
expf(logits[index] - 2 * logits[index] * (logits[index] >= 0)));
counts[index] = 1.;
}
}
}
template<typename T>
__global__ void SigmoidCrossEntropyLossGradientKernel(
const int n,
const T* logits,
const T* targets,
T* d_logits,
T* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
d_logits[index] = 0.;
counts[index] = 0.;
} else {
d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index];
counts[index] = 1.;
}
}
}
template<typename T>
inline void SigmoidCrossEntropyForward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &loss,
Tensor<gpu, 1, T> &loss_sum,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
Tensor<gpu, 1, T> &out,
T scale) {
using namespace mshadow::expr;
hipLaunchKernelGGL(( SigmoidCrossEntropyLossKernel), dim3(CAFFE_GET_BLOCKS(data.shape_.Size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data.shape_.Size(), data.dptr_, label.dptr_, loss.dptr_, count.dptr_);
loss_sum = sumall_except_dim<0>(loss);
count_sum = sumall_except_dim<0>(count);
count_sum += static_cast<T>(1e-5);
out = loss_sum / count_sum;
int count_num = (count.size(0) * count.size(1));
//out /= static_cast<T>(count_num);
// mx.metric.Loss will take care of this
// out *= scale;
}
template<typename T>
inline void SigmoidCrossEntropyBackward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &d_data,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
T scale) {
using namespace mshadow::expr;
hipLaunchKernelGGL(( SigmoidCrossEntropyLossGradientKernel), dim3(CAFFE_GET_BLOCKS(data.shape_.Size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data.shape_.Size(), data.dptr_, label.dptr_, d_data.dptr_, count.dptr_);
count_sum = sumall_except_dim<0>(count);
count_sum += static_cast<T>(1e-5);
d_data /= broadcast<0>(count_sum, d_data.shape_);
int count_num = (count.size(0) * count.size(1));
//d_data /= static_cast<T>(count_num);
d_data *= scale;
}
} // namespace cuda
template<typename T>
inline void SigmoidCrossEntropyForward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &loss,
Tensor<gpu, 1, T> &loss_sum,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
Tensor<gpu, 1, T> &out,
T scale) {
cuda::SigmoidCrossEntropyForward(data, label, loss, loss_sum, count, count_sum, out, scale);
}
template<typename T>
inline void SigmoidCrossEntropyBackward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &d_data,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
T scale) {
cuda::SigmoidCrossEntropyBackward(data, label, d_data, count, count_sum, scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(SigmoidCrossEntropyParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new SigmoidCrossEntropyOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
| 9f75b7a23bb2b17990aaa825ab5ee90f7b357b0e.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 by Contributors
* \file sigmoid_cross_entropy.cu
* \brief
* \author Yuntao Chen
*/
#include "./sigmoid_cross_entropy-inl.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
constexpr int CAFFE_CUDA_NUM_THREADS = 512;
constexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;
inline int CAFFE_GET_BLOCKS(const int N) {
return std::min((N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS,
CAFFE_MAXIMUM_NUM_BLOCKS);
}
namespace mshadow {
namespace cuda {
template<typename T>
__global__ void SigmoidCrossEntropyLossKernel(
const int n,
const T* logits,
const T* targets,
T* losses,
T* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
losses[index] = 0.;
counts[index] = 0.;
} else {
losses[index] =
-1. * logits[index] * (targets[index] - (logits[index] >= 0)) +
logf(
1 +
expf(logits[index] - 2 * logits[index] * (logits[index] >= 0)));
counts[index] = 1.;
}
}
}
template<typename T>
__global__ void SigmoidCrossEntropyLossGradientKernel(
const int n,
const T* logits,
const T* targets,
T* d_logits,
T* counts) {
CUDA_1D_KERNEL_LOOP(index, n) {
if (targets[index] == -1) {
d_logits[index] = 0.;
counts[index] = 0.;
} else {
d_logits[index] = 1. / (1. + expf(-logits[index])) - targets[index];
counts[index] = 1.;
}
}
}
template<typename T>
inline void SigmoidCrossEntropyForward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &loss,
Tensor<gpu, 1, T> &loss_sum,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
Tensor<gpu, 1, T> &out,
T scale) {
using namespace mshadow::expr;
SigmoidCrossEntropyLossKernel<<<CAFFE_GET_BLOCKS(data.shape_.Size()), CAFFE_CUDA_NUM_THREADS, 0>>>(
data.shape_.Size(), data.dptr_, label.dptr_, loss.dptr_, count.dptr_);
loss_sum = sumall_except_dim<0>(loss);
count_sum = sumall_except_dim<0>(count);
count_sum += static_cast<T>(1e-5);
out = loss_sum / count_sum;
int count_num = (count.size(0) * count.size(1));
//out /= static_cast<T>(count_num);
// mx.metric.Loss will take care of this
// out *= scale;
}
template<typename T>
inline void SigmoidCrossEntropyBackward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &d_data,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
T scale) {
using namespace mshadow::expr;
SigmoidCrossEntropyLossGradientKernel<<<CAFFE_GET_BLOCKS(data.shape_.Size()), CAFFE_CUDA_NUM_THREADS, 0>>>(
data.shape_.Size(), data.dptr_, label.dptr_, d_data.dptr_, count.dptr_);
count_sum = sumall_except_dim<0>(count);
count_sum += static_cast<T>(1e-5);
d_data /= broadcast<0>(count_sum, d_data.shape_);
int count_num = (count.size(0) * count.size(1));
//d_data /= static_cast<T>(count_num);
d_data *= scale;
}
} // namespace cuda
template<typename T>
inline void SigmoidCrossEntropyForward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &loss,
Tensor<gpu, 1, T> &loss_sum,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
Tensor<gpu, 1, T> &out,
T scale) {
cuda::SigmoidCrossEntropyForward(data, label, loss, loss_sum, count, count_sum, out, scale);
}
template<typename T>
inline void SigmoidCrossEntropyBackward(const Tensor<gpu, 2, T> &data,
const Tensor<gpu, 2, T> &label,
Tensor<gpu, 2, T> &d_data,
Tensor<gpu, 2, T> &count,
Tensor<gpu, 1, T> &count_sum,
T scale) {
cuda::SigmoidCrossEntropyBackward(data, label, d_data, count, count_sum, scale);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(SigmoidCrossEntropyParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new SigmoidCrossEntropyOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
c922b6096428898a766997ca0dd6dfccebab5dac.hip | // !!! This is a file automatically generated by hipify!!!
#include "GpuIndirectDrawApp.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void UpdateCommandKernel(unsigned int* ptr,unsigned int primCount, unsigned drawCount)
{
unsigned step = blockDim.x * gridDim.x;
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < drawCount;
i += step)
{
unsigned offset = 5*i;
ptr[offset] = primCount;
ptr[offset+1] = 1;
ptr[offset+2] = 0;
ptr[offset+3] = 0;
ptr[offset+4] = i;
}
}
void UpdateDrawCommand(unsigned int* devPtr,unsigned int primCount, unsigned int size)
{
const size_t BLOCK_SIZE = 128;
int numSMs;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, 0);
size_t max_blocks = 65535;
size_t n_blocks = min( max_blocks, (size + (BLOCK_SIZE*numSMs)-1) / (BLOCK_SIZE*numSMs) );
hipLaunchKernelGGL(( UpdateCommandKernel), dim3(n_blocks*numSMs),dim3(BLOCK_SIZE), 0, 0, devPtr,primCount,size);
}
iglu::IGLUApp* app;
int main()
{
app = new OGL::GpuIndirectDrawApp("../../CommonSampleFiles/scenes/nature.txt");
OGL::GpuIndirectDrawApp* GIDApp = static_cast<OGL::GpuIndirectDrawApp*>(app);
GIDApp->SetUpdateDrawCommand(UpdateDrawCommand);
app->Run();
return 0;
}
| c922b6096428898a766997ca0dd6dfccebab5dac.cu | #include "GpuIndirectDrawApp.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void UpdateCommandKernel(unsigned int* ptr,unsigned int primCount, unsigned drawCount)
{
unsigned step = blockDim.x * gridDim.x;
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < drawCount;
i += step)
{
unsigned offset = 5*i;
ptr[offset] = primCount;
ptr[offset+1] = 1;
ptr[offset+2] = 0;
ptr[offset+3] = 0;
ptr[offset+4] = i;
}
}
void UpdateDrawCommand(unsigned int* devPtr,unsigned int primCount, unsigned int size)
{
const size_t BLOCK_SIZE = 128;
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
size_t max_blocks = 65535;
size_t n_blocks = min( max_blocks, (size + (BLOCK_SIZE*numSMs)-1) / (BLOCK_SIZE*numSMs) );
UpdateCommandKernel<<<n_blocks*numSMs,BLOCK_SIZE>>>(devPtr,primCount,size);
}
iglu::IGLUApp* app;
int main()
{
app = new OGL::GpuIndirectDrawApp("../../CommonSampleFiles/scenes/nature.txt");
OGL::GpuIndirectDrawApp* GIDApp = static_cast<OGL::GpuIndirectDrawApp*>(app);
GIDApp->SetUpdateDrawCommand(UpdateDrawCommand);
app->Run();
return 0;
}
|
6d81ef49bf4935b274793582be95beaba2ee53c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h>
#include "pso_cluster.h"
/*
* Get euclidean distance between 2 pixels
*/
__host__ __device__
float devGetDistance(int *first, int *second)
{
float total = 0.0f;
for (int i = 0; i < DATA_DIM; i++)
{
int res = (first[i] - second[i]);
total += res * res;
}
return sqrt(total);
}
/*
* Get error for given centroids
*/
__host__ __device__
float devFitness(short* assignMat, int* datas, int* centroids, int data_size,
int cluster_size)
{
float total = 0.0f;
for (int i = 0; i < cluster_size; i++)
{
float subtotal = 0.0f;
for (int j = 0; j < data_size; j++)
{
if (assignMat[j] == i)
subtotal += devGetDistance(&datas[j * DATA_DIM],
¢roids[i * DATA_DIM]);
}
total += subtotal / data_size;
}
return total / cluster_size;
}
/*
* Assign pixels to centroids
*/
__host__ __device__
void devAssignDataToCentroid(short *assignMat, int *datas, int *centroids,
int data_size, int cluster_size)
{
for (int i = 0; i < data_size; i++)
{
int nearestCentroidIdx = 0;
float nearestCentroidDist = INF;
for (int j = 0; j < cluster_size; j++)
{
float nearestDist = devGetDistance(&datas[i * DATA_DIM],
¢roids[j * DATA_DIM]);
if (nearestDist < nearestCentroidDist)
{
nearestCentroidDist = nearestDist;
nearestCentroidIdx = j;
}
}
assignMat[i] = nearestCentroidIdx;
}
}
/*
* Initialize necessary variables for PSO
*/
void initialize(int *positions, int *velocities, int *pBests, int *gBest,
const data* datas, int data_size, int particle_size,
int cluster_size)
{
for (int i = 0; i < particle_size * cluster_size * DATA_DIM; i+= DATA_DIM)
{
int rand = round(getRandom(0, data_size - 1));
for(int j = 0; j < DATA_DIM; j++)
{
positions[i + j] = datas[rand].info[j];
pBests[i + j] = datas[rand].info[j];
velocities[i + j] = 0;
}
}
for(int i = 0; i < cluster_size * DATA_DIM; i++)
gBest[i] = pBests[i];
}
/*
* Kernel to update particle
*/
__global__ void kernelUpdateParticle(int *positions, int *velocities,
int *pBests, int *gBest, short *posAssign,
int* datas, float rp, float rg,
int data_size, int particle_size,
int cluster_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particle_size * cluster_size * DATA_DIM)
return;
// Update particle velocity and position
velocities[i] = (int)lroundf(OMEGA * velocities[i]
+ c1 * rp * (pBests[i] - positions[i])
+ c2 * rg *
(gBest[i % (cluster_size * DATA_DIM)] - positions[i]));
positions[i] += velocities[i];
}
/*
* Kernel to update particle
*/
__global__ void kernelUpdatePBest(int *positions, int *pBests, short *posAssign,
short *pBestAssign, int* datas, int data_size,
int particle_size, int cluster_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int offsetParticle = i * cluster_size * DATA_DIM;
int offsetAssign = i * data_size;
if(i >= particle_size)
return;
devAssignDataToCentroid(&posAssign[offsetAssign], datas,
&positions[offsetParticle], data_size, cluster_size);
// Update pBest
if (devFitness(&posAssign[offsetAssign], datas, &positions[offsetParticle],
data_size, cluster_size)
< devFitness(&pBestAssign[offsetAssign], datas, &pBests[offsetParticle],
data_size, cluster_size))
{
// Update pBest position
for (int k = 0; k < cluster_size * DATA_DIM; k++)
pBests[offsetParticle + k] = positions[offsetParticle + k];
// Update pBest assignment matrix
for(int k = 0; k < data_size; k++)
pBestAssign[offsetAssign + k] = posAssign[offsetAssign + k];
}
}
/*
* Wrapper to initialize and running PSO on device
*/
extern "C" GBest devicePsoClustering(data *datas, int *flatDatas, int data_size,
int particle_size, int cluster_size,
int max_iter)
{
// Initialize host memory
int *positions = new int[particle_size * cluster_size * DATA_DIM];
int *velocities = new int[particle_size * cluster_size * DATA_DIM];
int *pBests = new int[particle_size * cluster_size * DATA_DIM];
int *gBest = new int[cluster_size * DATA_DIM];
short *posAssign = new short[particle_size * data_size];
short *pBestAssign = new short[particle_size * data_size];
short *gBestAssign = new short[data_size];
// Initialize assignment matrix to cluster 0
for(int i = 0; i < particle_size * data_size; i++)
{
posAssign[i] = 0;
pBestAssign[i] = 0;
if(i < data_size)
gBestAssign[i] = 0;
}
initialize(positions, velocities, pBests, gBest, datas, data_size,
particle_size, cluster_size);
// Initialize device memory
int *devPositions, *devVelocities, *devPBests, *devGBest;
short *devPosAssign, *devPBestAssign;
int *devDatas;
size_t size = sizeof(int) * particle_size * cluster_size * DATA_DIM;
size_t assign_size = sizeof(short) * particle_size * data_size;
hipMalloc((void**)&devPositions, size);
hipMalloc((void**)&devVelocities, size);
hipMalloc((void**)&devPBests, size);
hipMalloc((void**)&devGBest, sizeof(int) * cluster_size * DATA_DIM);
hipMalloc((void**)&devPosAssign, assign_size);
hipMalloc((void**)&devPBestAssign, assign_size);
hipMalloc((void**)&devDatas, sizeof(int) * data_size * DATA_DIM);
// Copy data from host to device
hipMemcpy(devPositions, positions, size, hipMemcpyHostToDevice);
hipMemcpy(devVelocities, velocities, size, hipMemcpyHostToDevice);
hipMemcpy(devPBests, pBests, size, hipMemcpyHostToDevice);
hipMemcpy(devGBest, gBest, sizeof(int) * cluster_size * DATA_DIM,
hipMemcpyHostToDevice);
hipMemcpy(devPosAssign, posAssign, assign_size, hipMemcpyHostToDevice);
hipMemcpy(devPBestAssign, pBestAssign, assign_size, hipMemcpyHostToDevice);
hipMemcpy(devDatas, flatDatas, sizeof(int) * data_size * DATA_DIM,
hipMemcpyHostToDevice);
// Threads and blocks number
int threads = 32;
int blocksPart = (particle_size / threads) + 1;
int blocksFull = (particle_size * cluster_size * DATA_DIM / threads) + 1;
// Iteration
for (int iter = 0; iter < max_iter; iter++)
{
float rp = getRandomClamped();
float rg = getRandomClamped();
hipLaunchKernelGGL(( kernelUpdateParticle), dim3(blocksFull), dim3(threads), 0, 0,
devPositions, devVelocities, devPBests, devGBest, devPosAssign,
devDatas, rp, rg, data_size, particle_size, cluster_size);
hipLaunchKernelGGL(( kernelUpdatePBest), dim3(blocksPart), dim3(threads), 0, 0,
devPositions, devPBests, devPosAssign, devPBestAssign, devDatas,
data_size, particle_size, cluster_size);
// Compute gBest on host
hipMemcpy(pBests, devPBests, size, hipMemcpyDeviceToHost);
hipMemcpy(pBestAssign, devPBestAssign, assign_size,
hipMemcpyDeviceToHost);
for(int i = 0; i < particle_size; i++)
{
// Get slice of array
int offsetParticle = i * cluster_size * DATA_DIM;
int offsetAssign = i * data_size;
// Compare pBest and gBest
if (devFitness(&pBestAssign[offsetAssign], flatDatas,
&pBests[offsetParticle], data_size, cluster_size)
< devFitness(gBestAssign, flatDatas, gBest, data_size,
cluster_size))
{
// Update gBest position
for (int k = 0; k < cluster_size * DATA_DIM; k++)
gBest[k] = pBests[offsetParticle + k];
// Update gBest assignment matrix
for(int k = 0; k < data_size; k++)
gBestAssign[k] = pBestAssign[offsetAssign + k];
}
}
hipMemcpy(devGBest, gBest, sizeof(int) * cluster_size * DATA_DIM,
hipMemcpyHostToDevice);
}
// Copy gBest from device to host
hipMemcpy(gBest, devGBest, sizeof(int) * cluster_size * DATA_DIM,
hipMemcpyDeviceToHost);
// Cleanup
delete[] positions;
delete[] velocities;
delete[] pBests;
delete[] posAssign;
delete[] pBestAssign;
hipFree(devPositions);
hipFree(devVelocities);
hipFree(devPBests);
hipFree(devGBest);
hipFree(devPosAssign);
hipFree(devPBestAssign);
hipFree(devDatas);
GBest gBestReturn;
gBestReturn.gBestAssign = gBestAssign;
gBestReturn.arrCentroids = gBest;
return gBestReturn;
} | 6d81ef49bf4935b274793582be95beaba2ee53c5.cu | #include <math_functions.h>
#include "pso_cluster.h"
/*
* Get euclidean distance between 2 pixels
*/
__host__ __device__
float devGetDistance(int *first, int *second)
{
float total = 0.0f;
for (int i = 0; i < DATA_DIM; i++)
{
int res = (first[i] - second[i]);
total += res * res;
}
return sqrt(total);
}
/*
* Get error for given centroids
*/
__host__ __device__
float devFitness(short* assignMat, int* datas, int* centroids, int data_size,
int cluster_size)
{
float total = 0.0f;
for (int i = 0; i < cluster_size; i++)
{
float subtotal = 0.0f;
for (int j = 0; j < data_size; j++)
{
if (assignMat[j] == i)
subtotal += devGetDistance(&datas[j * DATA_DIM],
¢roids[i * DATA_DIM]);
}
total += subtotal / data_size;
}
return total / cluster_size;
}
/*
* Assign pixels to centroids
*/
__host__ __device__
void devAssignDataToCentroid(short *assignMat, int *datas, int *centroids,
int data_size, int cluster_size)
{
for (int i = 0; i < data_size; i++)
{
int nearestCentroidIdx = 0;
float nearestCentroidDist = INF;
for (int j = 0; j < cluster_size; j++)
{
float nearestDist = devGetDistance(&datas[i * DATA_DIM],
¢roids[j * DATA_DIM]);
if (nearestDist < nearestCentroidDist)
{
nearestCentroidDist = nearestDist;
nearestCentroidIdx = j;
}
}
assignMat[i] = nearestCentroidIdx;
}
}
/*
* Initialize necessary variables for PSO
*/
void initialize(int *positions, int *velocities, int *pBests, int *gBest,
const data* datas, int data_size, int particle_size,
int cluster_size)
{
for (int i = 0; i < particle_size * cluster_size * DATA_DIM; i+= DATA_DIM)
{
int rand = round(getRandom(0, data_size - 1));
for(int j = 0; j < DATA_DIM; j++)
{
positions[i + j] = datas[rand].info[j];
pBests[i + j] = datas[rand].info[j];
velocities[i + j] = 0;
}
}
for(int i = 0; i < cluster_size * DATA_DIM; i++)
gBest[i] = pBests[i];
}
/*
* Kernel to update particle
*/
__global__ void kernelUpdateParticle(int *positions, int *velocities,
int *pBests, int *gBest, short *posAssign,
int* datas, float rp, float rg,
int data_size, int particle_size,
int cluster_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particle_size * cluster_size * DATA_DIM)
return;
// Update particle velocity and position
velocities[i] = (int)lroundf(OMEGA * velocities[i]
+ c1 * rp * (pBests[i] - positions[i])
+ c2 * rg *
(gBest[i % (cluster_size * DATA_DIM)] - positions[i]));
positions[i] += velocities[i];
}
/*
* Kernel to update particle
*/
__global__ void kernelUpdatePBest(int *positions, int *pBests, short *posAssign,
short *pBestAssign, int* datas, int data_size,
int particle_size, int cluster_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int offsetParticle = i * cluster_size * DATA_DIM;
int offsetAssign = i * data_size;
if(i >= particle_size)
return;
devAssignDataToCentroid(&posAssign[offsetAssign], datas,
&positions[offsetParticle], data_size, cluster_size);
// Update pBest
if (devFitness(&posAssign[offsetAssign], datas, &positions[offsetParticle],
data_size, cluster_size)
< devFitness(&pBestAssign[offsetAssign], datas, &pBests[offsetParticle],
data_size, cluster_size))
{
// Update pBest position
for (int k = 0; k < cluster_size * DATA_DIM; k++)
pBests[offsetParticle + k] = positions[offsetParticle + k];
// Update pBest assignment matrix
for(int k = 0; k < data_size; k++)
pBestAssign[offsetAssign + k] = posAssign[offsetAssign + k];
}
}
/*
* Wrapper to initialize and running PSO on device
*/
extern "C" GBest devicePsoClustering(data *datas, int *flatDatas, int data_size,
int particle_size, int cluster_size,
int max_iter)
{
// Initialize host memory
int *positions = new int[particle_size * cluster_size * DATA_DIM];
int *velocities = new int[particle_size * cluster_size * DATA_DIM];
int *pBests = new int[particle_size * cluster_size * DATA_DIM];
int *gBest = new int[cluster_size * DATA_DIM];
short *posAssign = new short[particle_size * data_size];
short *pBestAssign = new short[particle_size * data_size];
short *gBestAssign = new short[data_size];
// Initialize assignment matrix to cluster 0
for(int i = 0; i < particle_size * data_size; i++)
{
posAssign[i] = 0;
pBestAssign[i] = 0;
if(i < data_size)
gBestAssign[i] = 0;
}
initialize(positions, velocities, pBests, gBest, datas, data_size,
particle_size, cluster_size);
// Initialize device memory
int *devPositions, *devVelocities, *devPBests, *devGBest;
short *devPosAssign, *devPBestAssign;
int *devDatas;
size_t size = sizeof(int) * particle_size * cluster_size * DATA_DIM;
size_t assign_size = sizeof(short) * particle_size * data_size;
cudaMalloc((void**)&devPositions, size);
cudaMalloc((void**)&devVelocities, size);
cudaMalloc((void**)&devPBests, size);
cudaMalloc((void**)&devGBest, sizeof(int) * cluster_size * DATA_DIM);
cudaMalloc((void**)&devPosAssign, assign_size);
cudaMalloc((void**)&devPBestAssign, assign_size);
cudaMalloc((void**)&devDatas, sizeof(int) * data_size * DATA_DIM);
// Copy data from host to device
cudaMemcpy(devPositions, positions, size, cudaMemcpyHostToDevice);
cudaMemcpy(devVelocities, velocities, size, cudaMemcpyHostToDevice);
cudaMemcpy(devPBests, pBests, size, cudaMemcpyHostToDevice);
cudaMemcpy(devGBest, gBest, sizeof(int) * cluster_size * DATA_DIM,
cudaMemcpyHostToDevice);
cudaMemcpy(devPosAssign, posAssign, assign_size, cudaMemcpyHostToDevice);
cudaMemcpy(devPBestAssign, pBestAssign, assign_size, cudaMemcpyHostToDevice);
cudaMemcpy(devDatas, flatDatas, sizeof(int) * data_size * DATA_DIM,
cudaMemcpyHostToDevice);
// Threads and blocks number
int threads = 32;
int blocksPart = (particle_size / threads) + 1;
int blocksFull = (particle_size * cluster_size * DATA_DIM / threads) + 1;
// Iteration
for (int iter = 0; iter < max_iter; iter++)
{
float rp = getRandomClamped();
float rg = getRandomClamped();
kernelUpdateParticle<<<blocksFull, threads>>>
(devPositions, devVelocities, devPBests, devGBest, devPosAssign,
devDatas, rp, rg, data_size, particle_size, cluster_size);
kernelUpdatePBest<<<blocksPart, threads>>>
(devPositions, devPBests, devPosAssign, devPBestAssign, devDatas,
data_size, particle_size, cluster_size);
// Compute gBest on host
cudaMemcpy(pBests, devPBests, size, cudaMemcpyDeviceToHost);
cudaMemcpy(pBestAssign, devPBestAssign, assign_size,
cudaMemcpyDeviceToHost);
for(int i = 0; i < particle_size; i++)
{
// Get slice of array
int offsetParticle = i * cluster_size * DATA_DIM;
int offsetAssign = i * data_size;
// Compare pBest and gBest
if (devFitness(&pBestAssign[offsetAssign], flatDatas,
&pBests[offsetParticle], data_size, cluster_size)
< devFitness(gBestAssign, flatDatas, gBest, data_size,
cluster_size))
{
// Update gBest position
for (int k = 0; k < cluster_size * DATA_DIM; k++)
gBest[k] = pBests[offsetParticle + k];
// Update gBest assignment matrix
for(int k = 0; k < data_size; k++)
gBestAssign[k] = pBestAssign[offsetAssign + k];
}
}
cudaMemcpy(devGBest, gBest, sizeof(int) * cluster_size * DATA_DIM,
cudaMemcpyHostToDevice);
}
// Copy gBest from device to host
cudaMemcpy(gBest, devGBest, sizeof(int) * cluster_size * DATA_DIM,
cudaMemcpyDeviceToHost);
// Cleanup
delete[] positions;
delete[] velocities;
delete[] pBests;
delete[] posAssign;
delete[] pBestAssign;
cudaFree(devPositions);
cudaFree(devVelocities);
cudaFree(devPBests);
cudaFree(devGBest);
cudaFree(devPosAssign);
cudaFree(devPBestAssign);
cudaFree(devDatas);
GBest gBestReturn;
gBestReturn.gBestAssign = gBestAssign;
gBestReturn.arrCentroids = gBest;
return gBestReturn;
} |
ffb874491ee31c46bc081fd6ed28ede359cd4d2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
using namespace std;
//#define BLOCKS_NUM 5
//#define BLOCK_THREADS 128
//#define ITEMS_PER_THREAD 4
//#define CEILING_DIVIDE(X, Y) (1 + (((X) - 1) / (Y)))
#define CHECK(res) if(res!=hipSuccess){printf("CHECK ERROR!\n");exit(-1);}
__device__
int base2int(char base) {
switch (base) {
case 'A':
return 0;
case 'C':
return 1;
case 'G':
return 2;
case 'T':
return 3;
default:
return -1;
}
}
__device__
uint64 getHashValue (uint64 *x, uint64 b, int k) {
return SpookyHash_d::Hash64(x, (k / 32 + 1) * 8, b);
}
/*
* Get hash value list on blocks
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockGetHashValues (const int k, char *dna_d, int thread_offset, uint64 *thread_dataR, int hash_b) {
int list_index = 0;
int dna_index = thread_offset;
bool isEnd = 0;
uint64 *cur_seq = new uint64[k / 32 + 1]; // current sub-sequence
for (int i = 0; i < k / 32 + 1; ++i)
cur_seq[i] = 0;
if (k < 32) {
for (; dna_index < thread_offset + k - 1; ++dna_index) {
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (base2int(dna_d[dna_index]) != -1)
cur_seq[0] = (cur_seq[0] << 2) % ((uint64) 1 << (2 * k)) + base2int(dna_d[dna_index]);
}
for (; dna_index < thread_offset + ITEMS_PER_THREAD + k - 1; ++dna_index) {
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (isEnd)
thread_dataR[list_index++] = UINT64_MAX;
else {
if (base2int(dna_d[dna_index]) != -1)
cur_seq[0] = (cur_seq[0] << 2) % ((uint64) 1 << (2 * k)) + base2int(dna_d[dna_index]);
thread_dataR[list_index++] = getHashValue(cur_seq, hash_b, k);
}
}
} else {
for (; dna_index < thread_offset + k; ++dna_index) {
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (base2int(dna_d[dna_index]) != -1)
cur_seq[dna_index / 32] =
(cur_seq[dna_index / 32] << 2) % UINT64_MAX + base2int(dna_d[dna_index]);
}
if (isEnd)
thread_dataR[list_index++] = UINT64_MAX;
else
thread_dataR[list_index++] = getHashValue(cur_seq, hash_b, k);
for (; dna_index < thread_offset + ITEMS_PER_THREAD + k - 1; ++dna_index) {
for (int j = 0; j < k / 32 - 1; ++j) {
cur_seq[j] = (cur_seq[j] << 2) + (cur_seq[j + 1] >> 62);
}
cur_seq[k / 32 - 1] = (cur_seq[k / 32 - 1] << 2) + (cur_seq[k / 32] >> ((k % 32) * 2 - 2));
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (base2int(dna_d[dna_index]) != -1)
cur_seq[k / 32] = (cur_seq[k / 32] << 2) % ((uint64) 1 << (2 * (k % 32))) +
base2int(dna_d[dna_index]);
if (isEnd)
thread_dataR[list_index++] = UINT64_MAX;
else
thread_dataR[list_index++] = getHashValue(cur_seq, hash_b, k);
}
}
delete [] cur_seq;
}
/*
* Remove duplicate values from sorted hash value list.
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockRemoveDupe (const int m, int *thread_dataS, int *thread_dataD,
uint64 *thread_dataR, uint64 *sketch) {
int offset = thread_dataS[0];
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (thread_dataD[i] == 1 && offset < m)
sketch[offset++] = thread_dataR[i];
}
/*
* Get sketch of each block back to input_d
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockGetBack (const int m, int threadID, uint64 *thread_dataR, uint64 *sketch) {
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (threadID * ITEMS_PER_THREAD + i < m)
thread_dataR[i] = sketch[threadID * ITEMS_PER_THREAD + i];
}
/*
* Core Function
* Firstly, get hash value list from DNA sequence.
* For example, if dna_d is [A,C,C,G,T,A,T,G,C,T,G,A,...].
* Then input_d should be [6,5,6,1,0,2,4,2,1,2,6,3,...].
* Secondly, block-sort the hash value list.
* For example, if input_d is [6,5,6,1,0,2,4,2,1,2,6,3,...].
* Then output is still input_d: [0,1,1,2,2,2,3,4,5,6,6,6,...].
* Thirdly, mark the non-duplicate values.
* For example, if input_d is [0,1,1,2,2,2,3,4,5,6,6,6,...].
* Then dupe_d should be [1,1,0,1,0,0,1,1,1,1,0,0,...].
* Then, scan dupe_d to find the offset of values in output.
* For example, if dupe_d is [1,1,0,1,0,0,1,1,1,1,0,0,...].
* Then scan_d should be [0,1,2,2,3,3,3,4,5,6,7,7,...].
* Finally, get sketches of each block.
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__
void getBlockSketch (const int k, const int m, char *dna_d, uint64 *input_d,
int numElem_dna, int numElem_list, uint64 hash_b) {
typedef cub::BlockStore<uint64, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStoreR;
typedef cub::BlockRadixSort <uint64, BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSort;
typedef cub::BlockDiscontinuity<uint64, BLOCK_THREADS> BlockDiscontinuity;
typedef hipcub::BlockScan<int, BLOCK_THREADS> BlockScan;
__shared__ union {
typename BlockStoreR::TempStorage store;
typename BlockRadixSort::TempStorage sort;
typename BlockDiscontinuity::TempStorage dupe;
typename BlockScan::TempStorage scan;
} temp_storage;
uint64 thread_dataR[ITEMS_PER_THREAD];
int thread_dataD[ITEMS_PER_THREAD];
int thread_dataS[ITEMS_PER_THREAD];
__shared__ uint64 sketch[BLOCK_THREADS * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
sketch[threadIdx.x * ITEMS_PER_THREAD + i] = UINT64_MAX;
__syncthreads();
int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);
int thread_offset = (BLOCK_THREADS * blockIdx.x + threadIdx.x) * ITEMS_PER_THREAD;
int threadID = threadIdx.x;
BlockGetHashValues<BLOCK_THREADS, ITEMS_PER_THREAD>(k, dna_d, thread_offset, thread_dataR, hash_b);
__syncthreads();
BlockRadixSort(temp_storage.sort).Sort(thread_dataR);
__syncthreads();
BlockDiscontinuity(temp_storage.dupe).FlagHeads(thread_dataD, thread_dataR, cub::Inequality());
__syncthreads();
BlockScan(temp_storage.scan).ExclusiveSum(thread_dataD, thread_dataS);
__syncthreads();
BlockRemoveDupe<BLOCK_THREADS, ITEMS_PER_THREAD>(m, thread_dataS, thread_dataD, thread_dataR, sketch);
__syncthreads();
BlockGetBack<BLOCK_THREADS, ITEMS_PER_THREAD>(m, threadID, thread_dataR, sketch);
__syncthreads();
BlockStoreR(temp_storage.store).Store(input_d + block_offset, thread_dataR);
}
/*
* Get ranks and dupe-marks of list A
* */
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockGetRank(int m, uint64 *thread_data64, int *thread_dataR, int *thread_dataD, uint64 *shared_B) {
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
uint64 key = thread_data64[i];
int left = 0;
int right = m - 1;
int median;
int result = m;
while (left <= right) {
median = (left + right) / 2;
if (shared_B[median] >= key) {
result = median;
right = median - 1;
}
if (shared_B[median] < key) {
left = median + 1;
}
}
thread_dataD[i] = 0;
if (shared_B[result] == key) {
thread_dataD[i] = 1;
}
thread_dataR[i] += result;
}
}
/*
* Merge between blocks. (Binary-merge)
* Calculate offsets of each value and store them to rank_d.
* Then, write back to input_d.
* */
template<int BLOCKS_NUM, int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__
void getAllSketch (const int m, uint64 *input_d, int numElem_list) {
typedef cub::BlockLoad<uint64, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad64;
typedef hipcub::BlockScan<int, BLOCK_THREADS> BlockScan;
__shared__ union {
typename BlockLoad64::TempStorage load64;
typename BlockScan::TempStorage scan;
} temp_storage;
uint64 thread_data64[ITEMS_PER_THREAD];
int thread_dataD[ITEMS_PER_THREAD];
int thread_dataS[ITEMS_PER_THREAD];
int thread_dataR[ITEMS_PER_THREAD];
__shared__ uint64 shared_B[BLOCK_THREADS * ITEMS_PER_THREAD];
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int offset = 1;
while (offset < BLOCKS_NUM) {
if ( (blockID % (offset * 2) == 0 && blockID + offset < BLOCKS_NUM) ||
(blockID % offset == 0 && blockID % (offset * 2) != 0) ) {
BlockLoad64(temp_storage.load64).Load(input_d + blockID * BLOCK_THREADS * ITEMS_PER_THREAD, thread_data64);
__syncthreads();
int start_A, start_B;
if (blockID % (offset * 2) == 0) {
start_A = blockID;
start_B = blockID + offset;
}
if (blockID % (offset * 2) != 0) {
start_A = blockID;
start_B = blockID - offset;
}
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
thread_dataR[i] = threadID * ITEMS_PER_THREAD + i;
thread_dataD[i] = 0;
if (threadID * ITEMS_PER_THREAD + i < m)
shared_B[threadID * ITEMS_PER_THREAD + i] =
input_d[start_B * BLOCK_THREADS * ITEMS_PER_THREAD + threadID * ITEMS_PER_THREAD + i];
else
shared_B[threadID * ITEMS_PER_THREAD + i] = UINT64_MAX;
}
__syncthreads();
BlockGetRank<BLOCK_THREADS, ITEMS_PER_THREAD>(m, thread_data64, thread_dataR, thread_dataD, shared_B);
__syncthreads();
BlockScan(temp_storage.scan).ExclusiveSum(thread_dataD, thread_dataS);
__syncthreads();
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
thread_dataR[i] -= thread_dataS[i];
__syncthreads();
if (blockID % (offset * 2) == 0)
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (thread_dataD[i] != 1 && thread_dataR[i] < m)
input_d[start_A * BLOCK_THREADS * ITEMS_PER_THREAD + thread_dataR[i]] = thread_data64[i];
if (blockID % (offset * 2) != 0)
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (thread_dataR[i] < m)
input_d[start_B * BLOCK_THREADS * ITEMS_PER_THREAD + thread_dataR[i]] = thread_data64[i];
__syncthreads();
}
offset *= 2;
}
}
/* Merge.
* Not Parallel Version Currently.
*/
void rMerge(const int m, uint64 *sketch_h, uint64 *output_h) {
int pointer1 = 0, pointer2 = 0, count = 0;
uint64 * bucket = new uint64[m];
while (count < m) {
if (sketch_h[pointer1] < output_h[pointer2]) {
bucket[count++] = sketch_h[pointer1++];
} else if (sketch_h[pointer1] > output_h[pointer2]) {
bucket[count++] = output_h[pointer2++];
} else if (sketch_h[pointer1] == output_h[pointer2]) {
bucket[count++] = sketch_h[pointer1++];
pointer2 ++;
}
}
for (uint64 i = 0; i < m; i++)
output_h[i] = bucket[i];
delete [] bucket;
}
signature genSig(const int k, const int m, const int t, char *dnaList, int length, uint64 *hashes_b) {
const int BLOCKS_NUM = 16;
const int BLOCK_THREADS = 32 * 16;
const int ITEMS_PER_THREAD = 4;
// Compute CHUNKS_NUM and the start, end and record index.
signature sig(t, vector<uint64>(m, UINT64_MAX));
int CHUNKS_NUM;
if (length % (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD) == 0)
CHUNKS_NUM = (length - k + 1) / (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD);
else
CHUNKS_NUM = (length - k + 1) / (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD) + 1;
int *record = (int *) malloc(sizeof(int) * CHUNKS_NUM);
int *start = (int *) malloc(sizeof(int) * CHUNKS_NUM);
int *end = (int *) malloc(sizeof(int) * CHUNKS_NUM);
for (int i = 0; i < (CHUNKS_NUM - 1); ++i) {
record[i] = BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD + k - 1;
}
record[CHUNKS_NUM - 1] = length - (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD) * (CHUNKS_NUM - 1);
start[0] = 0;
end[0] = record[0] - 1;
if (CHUNKS_NUM >= 1)
start[1] = record[0] - k + 1;
for (int i = 1; i < CHUNKS_NUM - 1; i++) {
end[i] = start[i] + record[i] - 1;
start[i + 1] = end[i] + 1 - k + 1;
}
end[CHUNKS_NUM - 1] = length - 1;
// Initialization.
hipError_t res;
int numElem_dna = BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD + k - 1;
int numElem_list = BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD;
char *dna_h = (char *) malloc(sizeof(char) * numElem_dna);
uint64 * output_h = (uint64 *) malloc(sizeof(uint64) * m);
uint64 * sketch_h = (uint64 *) malloc(sizeof(uint64) * numElem_list);
char *dna_d;
uint64 * input_d;
res = hipMalloc(&dna_d, sizeof(char) * numElem_dna);
CHECK(res);
res = hipMalloc(&input_d, sizeof(uint64) * numElem_list);
CHECK(res);
for (int j = 0; j < t; j++) {
for (int i = 0; i < m; ++i)
output_h[i] = UINT64_MAX;
//cout << "hash_index: " << j << " hashes_b: " << hashes_b[j] << endl;
for (int p = 0; p < CHUNKS_NUM; p++) {
//cout << "\tchunk_index: " << p << endl;
for (int i = 0; i < numElem_dna; i++) {
if (i < record[p])
dna_h[i] = dnaList[i + start[p]];
else
dna_h[i] = 'S';
}
res = hipMemcpy((void *) (dna_d), (void *) (dna_h), numElem_dna * sizeof(char), hipMemcpyHostToDevice);
CHECK(res);
getBlockSketch <BLOCK_THREADS, ITEMS_PER_THREAD> << < BLOCKS_NUM, BLOCK_THREADS >> >
(k, m, dna_d, input_d, numElem_dna, numElem_list, hashes_b[j]);
getAllSketch <BLOCKS_NUM, BLOCK_THREADS, ITEMS_PER_THREAD> << < BLOCKS_NUM, BLOCK_THREADS >> >
(m, input_d, numElem_list);
res = hipMemcpy((void *) (sketch_h), (void *) (input_d), numElem_list * sizeof(uint64), hipMemcpyDeviceToHost);
CHECK(res);
rMerge(m, sketch_h, output_h);
}
for (int i = 0; i < m; i++) {
sig[j][i] = output_h[i];
}
}
hipFree((void *) dna_d);
hipFree((void *) input_d);
return sig;
}
| ffb874491ee31c46bc081fd6ed28ede359cd4d2f.cu | using namespace std;
//#define BLOCKS_NUM 5
//#define BLOCK_THREADS 128
//#define ITEMS_PER_THREAD 4
//#define CEILING_DIVIDE(X, Y) (1 + (((X) - 1) / (Y)))
#define CHECK(res) if(res!=cudaSuccess){printf("CHECK ERROR!\n");exit(-1);}
__device__
int base2int(char base) {
switch (base) {
case 'A':
return 0;
case 'C':
return 1;
case 'G':
return 2;
case 'T':
return 3;
default:
return -1;
}
}
__device__
uint64 getHashValue (uint64 *x, uint64 b, int k) {
return SpookyHash_d::Hash64(x, (k / 32 + 1) * 8, b);
}
/*
* Get hash value list on blocks
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockGetHashValues (const int k, char *dna_d, int thread_offset, uint64 *thread_dataR, int hash_b) {
int list_index = 0;
int dna_index = thread_offset;
bool isEnd = 0;
uint64 *cur_seq = new uint64[k / 32 + 1]; // current sub-sequence
for (int i = 0; i < k / 32 + 1; ++i)
cur_seq[i] = 0;
if (k < 32) {
for (; dna_index < thread_offset + k - 1; ++dna_index) {
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (base2int(dna_d[dna_index]) != -1)
cur_seq[0] = (cur_seq[0] << 2) % ((uint64) 1 << (2 * k)) + base2int(dna_d[dna_index]);
}
for (; dna_index < thread_offset + ITEMS_PER_THREAD + k - 1; ++dna_index) {
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (isEnd)
thread_dataR[list_index++] = UINT64_MAX;
else {
if (base2int(dna_d[dna_index]) != -1)
cur_seq[0] = (cur_seq[0] << 2) % ((uint64) 1 << (2 * k)) + base2int(dna_d[dna_index]);
thread_dataR[list_index++] = getHashValue(cur_seq, hash_b, k);
}
}
} else {
for (; dna_index < thread_offset + k; ++dna_index) {
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (base2int(dna_d[dna_index]) != -1)
cur_seq[dna_index / 32] =
(cur_seq[dna_index / 32] << 2) % UINT64_MAX + base2int(dna_d[dna_index]);
}
if (isEnd)
thread_dataR[list_index++] = UINT64_MAX;
else
thread_dataR[list_index++] = getHashValue(cur_seq, hash_b, k);
for (; dna_index < thread_offset + ITEMS_PER_THREAD + k - 1; ++dna_index) {
for (int j = 0; j < k / 32 - 1; ++j) {
cur_seq[j] = (cur_seq[j] << 2) + (cur_seq[j + 1] >> 62);
}
cur_seq[k / 32 - 1] = (cur_seq[k / 32 - 1] << 2) + (cur_seq[k / 32] >> ((k % 32) * 2 - 2));
if (dna_d[dna_index] == 'S')
isEnd = 1;
if (base2int(dna_d[dna_index]) != -1)
cur_seq[k / 32] = (cur_seq[k / 32] << 2) % ((uint64) 1 << (2 * (k % 32))) +
base2int(dna_d[dna_index]);
if (isEnd)
thread_dataR[list_index++] = UINT64_MAX;
else
thread_dataR[list_index++] = getHashValue(cur_seq, hash_b, k);
}
}
delete [] cur_seq;
}
/*
* Remove duplicate values from sorted hash value list.
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockRemoveDupe (const int m, int *thread_dataS, int *thread_dataD,
uint64 *thread_dataR, uint64 *sketch) {
int offset = thread_dataS[0];
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (thread_dataD[i] == 1 && offset < m)
sketch[offset++] = thread_dataR[i];
}
/*
* Get sketch of each block back to input_d
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockGetBack (const int m, int threadID, uint64 *thread_dataR, uint64 *sketch) {
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (threadID * ITEMS_PER_THREAD + i < m)
thread_dataR[i] = sketch[threadID * ITEMS_PER_THREAD + i];
}
/*
* Core Function
* Firstly, get hash value list from DNA sequence.
* For example, if dna_d is [A,C,C,G,T,A,T,G,C,T,G,A,...].
* Then input_d should be [6,5,6,1,0,2,4,2,1,2,6,3,...].
* Secondly, block-sort the hash value list.
* For example, if input_d is [6,5,6,1,0,2,4,2,1,2,6,3,...].
* Then output is still input_d: [0,1,1,2,2,2,3,4,5,6,6,6,...].
* Thirdly, mark the non-duplicate values.
* For example, if input_d is [0,1,1,2,2,2,3,4,5,6,6,6,...].
* Then dupe_d should be [1,1,0,1,0,0,1,1,1,1,0,0,...].
* Then, scan dupe_d to find the offset of values in output.
* For example, if dupe_d is [1,1,0,1,0,0,1,1,1,1,0,0,...].
* Then scan_d should be [0,1,2,2,3,3,3,4,5,6,7,7,...].
* Finally, get sketches of each block.
*/
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__
void getBlockSketch (const int k, const int m, char *dna_d, uint64 *input_d,
int numElem_dna, int numElem_list, uint64 hash_b) {
typedef cub::BlockStore<uint64, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_STORE_WARP_TRANSPOSE> BlockStoreR;
typedef cub::BlockRadixSort <uint64, BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSort;
typedef cub::BlockDiscontinuity<uint64, BLOCK_THREADS> BlockDiscontinuity;
typedef cub::BlockScan<int, BLOCK_THREADS> BlockScan;
__shared__ union {
typename BlockStoreR::TempStorage store;
typename BlockRadixSort::TempStorage sort;
typename BlockDiscontinuity::TempStorage dupe;
typename BlockScan::TempStorage scan;
} temp_storage;
uint64 thread_dataR[ITEMS_PER_THREAD];
int thread_dataD[ITEMS_PER_THREAD];
int thread_dataS[ITEMS_PER_THREAD];
__shared__ uint64 sketch[BLOCK_THREADS * ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
sketch[threadIdx.x * ITEMS_PER_THREAD + i] = UINT64_MAX;
__syncthreads();
int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);
int thread_offset = (BLOCK_THREADS * blockIdx.x + threadIdx.x) * ITEMS_PER_THREAD;
int threadID = threadIdx.x;
BlockGetHashValues<BLOCK_THREADS, ITEMS_PER_THREAD>(k, dna_d, thread_offset, thread_dataR, hash_b);
__syncthreads();
BlockRadixSort(temp_storage.sort).Sort(thread_dataR);
__syncthreads();
BlockDiscontinuity(temp_storage.dupe).FlagHeads(thread_dataD, thread_dataR, cub::Inequality());
__syncthreads();
BlockScan(temp_storage.scan).ExclusiveSum(thread_dataD, thread_dataS);
__syncthreads();
BlockRemoveDupe<BLOCK_THREADS, ITEMS_PER_THREAD>(m, thread_dataS, thread_dataD, thread_dataR, sketch);
__syncthreads();
BlockGetBack<BLOCK_THREADS, ITEMS_PER_THREAD>(m, threadID, thread_dataR, sketch);
__syncthreads();
BlockStoreR(temp_storage.store).Store(input_d + block_offset, thread_dataR);
}
/*
* Get ranks and dupe-marks of list A
* */
template<int BLOCK_THREADS, int ITEMS_PER_THREAD>
__device__
void BlockGetRank(int m, uint64 *thread_data64, int *thread_dataR, int *thread_dataD, uint64 *shared_B) {
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
uint64 key = thread_data64[i];
int left = 0;
int right = m - 1;
int median;
int result = m;
while (left <= right) {
median = (left + right) / 2;
if (shared_B[median] >= key) {
result = median;
right = median - 1;
}
if (shared_B[median] < key) {
left = median + 1;
}
}
thread_dataD[i] = 0;
if (shared_B[result] == key) {
thread_dataD[i] = 1;
}
thread_dataR[i] += result;
}
}
/*
* Merge between blocks. (Binary-merge)
* Calculate offsets of each value and store them to rank_d.
* Then, write back to input_d.
* */
template<int BLOCKS_NUM, int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__
void getAllSketch (const int m, uint64 *input_d, int numElem_list) {
typedef cub::BlockLoad<uint64, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad64;
typedef cub::BlockScan<int, BLOCK_THREADS> BlockScan;
__shared__ union {
typename BlockLoad64::TempStorage load64;
typename BlockScan::TempStorage scan;
} temp_storage;
uint64 thread_data64[ITEMS_PER_THREAD];
int thread_dataD[ITEMS_PER_THREAD];
int thread_dataS[ITEMS_PER_THREAD];
int thread_dataR[ITEMS_PER_THREAD];
__shared__ uint64 shared_B[BLOCK_THREADS * ITEMS_PER_THREAD];
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int offset = 1;
while (offset < BLOCKS_NUM) {
if ( (blockID % (offset * 2) == 0 && blockID + offset < BLOCKS_NUM) ||
(blockID % offset == 0 && blockID % (offset * 2) != 0) ) {
BlockLoad64(temp_storage.load64).Load(input_d + blockID * BLOCK_THREADS * ITEMS_PER_THREAD, thread_data64);
__syncthreads();
int start_A, start_B;
if (blockID % (offset * 2) == 0) {
start_A = blockID;
start_B = blockID + offset;
}
if (blockID % (offset * 2) != 0) {
start_A = blockID;
start_B = blockID - offset;
}
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
thread_dataR[i] = threadID * ITEMS_PER_THREAD + i;
thread_dataD[i] = 0;
if (threadID * ITEMS_PER_THREAD + i < m)
shared_B[threadID * ITEMS_PER_THREAD + i] =
input_d[start_B * BLOCK_THREADS * ITEMS_PER_THREAD + threadID * ITEMS_PER_THREAD + i];
else
shared_B[threadID * ITEMS_PER_THREAD + i] = UINT64_MAX;
}
__syncthreads();
BlockGetRank<BLOCK_THREADS, ITEMS_PER_THREAD>(m, thread_data64, thread_dataR, thread_dataD, shared_B);
__syncthreads();
BlockScan(temp_storage.scan).ExclusiveSum(thread_dataD, thread_dataS);
__syncthreads();
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
thread_dataR[i] -= thread_dataS[i];
__syncthreads();
if (blockID % (offset * 2) == 0)
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (thread_dataD[i] != 1 && thread_dataR[i] < m)
input_d[start_A * BLOCK_THREADS * ITEMS_PER_THREAD + thread_dataR[i]] = thread_data64[i];
if (blockID % (offset * 2) != 0)
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
if (thread_dataR[i] < m)
input_d[start_B * BLOCK_THREADS * ITEMS_PER_THREAD + thread_dataR[i]] = thread_data64[i];
__syncthreads();
}
offset *= 2;
}
}
/* Merge.
* Not Parallel Version Currently.
*/
void rMerge(const int m, uint64 *sketch_h, uint64 *output_h) {
int pointer1 = 0, pointer2 = 0, count = 0;
uint64 * bucket = new uint64[m];
while (count < m) {
if (sketch_h[pointer1] < output_h[pointer2]) {
bucket[count++] = sketch_h[pointer1++];
} else if (sketch_h[pointer1] > output_h[pointer2]) {
bucket[count++] = output_h[pointer2++];
} else if (sketch_h[pointer1] == output_h[pointer2]) {
bucket[count++] = sketch_h[pointer1++];
pointer2 ++;
}
}
for (uint64 i = 0; i < m; i++)
output_h[i] = bucket[i];
delete [] bucket;
}
signature genSig(const int k, const int m, const int t, char *dnaList, int length, uint64 *hashes_b) {
const int BLOCKS_NUM = 16;
const int BLOCK_THREADS = 32 * 16;
const int ITEMS_PER_THREAD = 4;
// Compute CHUNKS_NUM and the start, end and record index.
signature sig(t, vector<uint64>(m, UINT64_MAX));
int CHUNKS_NUM;
if (length % (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD) == 0)
CHUNKS_NUM = (length - k + 1) / (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD);
else
CHUNKS_NUM = (length - k + 1) / (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD) + 1;
int *record = (int *) malloc(sizeof(int) * CHUNKS_NUM);
int *start = (int *) malloc(sizeof(int) * CHUNKS_NUM);
int *end = (int *) malloc(sizeof(int) * CHUNKS_NUM);
for (int i = 0; i < (CHUNKS_NUM - 1); ++i) {
record[i] = BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD + k - 1;
}
record[CHUNKS_NUM - 1] = length - (BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD) * (CHUNKS_NUM - 1);
start[0] = 0;
end[0] = record[0] - 1;
if (CHUNKS_NUM >= 1)
start[1] = record[0] - k + 1;
for (int i = 1; i < CHUNKS_NUM - 1; i++) {
end[i] = start[i] + record[i] - 1;
start[i + 1] = end[i] + 1 - k + 1;
}
end[CHUNKS_NUM - 1] = length - 1;
// Initialization.
cudaError_t res;
int numElem_dna = BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD + k - 1;
int numElem_list = BLOCKS_NUM * BLOCK_THREADS * ITEMS_PER_THREAD;
char *dna_h = (char *) malloc(sizeof(char) * numElem_dna);
uint64 * output_h = (uint64 *) malloc(sizeof(uint64) * m);
uint64 * sketch_h = (uint64 *) malloc(sizeof(uint64) * numElem_list);
char *dna_d;
uint64 * input_d;
res = cudaMalloc(&dna_d, sizeof(char) * numElem_dna);
CHECK(res);
res = cudaMalloc(&input_d, sizeof(uint64) * numElem_list);
CHECK(res);
for (int j = 0; j < t; j++) {
for (int i = 0; i < m; ++i)
output_h[i] = UINT64_MAX;
//cout << "hash_index: " << j << " hashes_b: " << hashes_b[j] << endl;
for (int p = 0; p < CHUNKS_NUM; p++) {
//cout << "\tchunk_index: " << p << endl;
for (int i = 0; i < numElem_dna; i++) {
if (i < record[p])
dna_h[i] = dnaList[i + start[p]];
else
dna_h[i] = 'S';
}
res = cudaMemcpy((void *) (dna_d), (void *) (dna_h), numElem_dna * sizeof(char), cudaMemcpyHostToDevice);
CHECK(res);
getBlockSketch <BLOCK_THREADS, ITEMS_PER_THREAD> << < BLOCKS_NUM, BLOCK_THREADS >> >
(k, m, dna_d, input_d, numElem_dna, numElem_list, hashes_b[j]);
getAllSketch <BLOCKS_NUM, BLOCK_THREADS, ITEMS_PER_THREAD> << < BLOCKS_NUM, BLOCK_THREADS >> >
(m, input_d, numElem_list);
res = cudaMemcpy((void *) (sketch_h), (void *) (input_d), numElem_list * sizeof(uint64), cudaMemcpyDeviceToHost);
CHECK(res);
rMerge(m, sketch_h, output_h);
}
for (int i = 0; i < m; i++) {
sig[j][i] = output_h[i];
}
}
cudaFree((void *) dna_d);
cudaFree((void *) input_d);
return sig;
}
|
e72c7fe64dd2719a4beea338e92264dc3ceadc10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/piecewise_linear_transform_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
namespace caffe2 {
namespace {
__global__ void PieceWiseLinearTransformGeneralKernel(
const int N,
const int M,
const int num_grp,
const int num_fnc_per_grp,
const float* bounds,
const float* slopes,
const float* intercepts,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(i, N * M) {
int col = i % M;
const float* bounds_group = bounds + (col * (num_fnc_per_grp + 1));
const float* slopes_group = slopes + (col * num_fnc_per_grp);
const float* intercepts_group = intercepts + (col * num_fnc_per_grp);
if (X[i] <= bounds_group[0]) {
Y[i] = slopes_group[0] * bounds_group[0] + intercepts_group[0];
} else if (X[i] >= bounds_group[num_fnc_per_grp]) {
Y[i] = slopes_group[num_fnc_per_grp - 1] * bounds_group[num_fnc_per_grp] +
intercepts_group[num_fnc_per_grp - 1];
} else {
auto low_bound = thrust::lower_bound(
thrust::device,
bounds_group,
bounds_group + num_fnc_per_grp + 1,
X[i]);
int bounds_idx = low_bound - bounds_group - 1;
Y[i] = slopes_group[bounds_idx] * X[i] + intercepts_group[bounds_idx];
}
}
}
} // namespace
namespace {
__global__ void PieceWiseLinearTransformBinaryKernel1(
const int N,
const int M,
const int num_grp,
const int num_fnc_per_grp,
const float* bounds,
const float* slopes,
const float* intercepts,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
if (X[i] <= bounds[0]) {
Y[i] = slopes[0] * bounds[0] + intercepts[0];
} else if (X[i] >= bounds[num_fnc_per_grp]) {
Y[i] = slopes[num_fnc_per_grp - 1] * bounds[num_fnc_per_grp] +
intercepts[num_fnc_per_grp - 1];
} else {
auto low_bound = thrust::lower_bound(
thrust::device, bounds, bounds + num_fnc_per_grp + 1, X[i]);
int bounds_idx = low_bound - bounds - 1;
Y[i] = slopes[bounds_idx] * X[i] + intercepts[bounds_idx];
}
}
}
} // namespace
namespace {
__global__ void PieceWiseLinearTransformBinaryKernel2(
const int N,
const int M,
const int num_grp,
const int num_fnc_per_grp,
const float* bounds,
const float* slopes,
const float* intercepts,
const float* X,
float* Y) {
// N*M/2 = N as M=2
CUDA_1D_KERNEL_LOOP(i, N) {
int index = i * M;
if (X[index + 1] <= bounds[0]) {
Y[index + 1] = slopes[0] * bounds[0] + intercepts[0];
} else if (X[index + 1] >= bounds[num_fnc_per_grp]) {
Y[index + 1] = slopes[num_fnc_per_grp - 1] * bounds[num_fnc_per_grp] +
intercepts[num_fnc_per_grp - 1];
} else {
auto low_bound = thrust::lower_bound(
thrust::device, bounds, bounds + num_fnc_per_grp + 1, X[index + 1]);
int bounds_idx = low_bound - bounds - 1;
Y[index + 1] = slopes[bounds_idx] * X[index + 1] + intercepts[bounds_idx];
}
Y[index] = 1.0f - Y[index + 1];
}
}
} // namespace
template <>
void PiecewiseLinearTransformOp<float, CUDAContext>::setUpTensors(
int64_t& num_func_per_group,
int64_t& num_group,
int64_t M) {
if (transform_param_from_arg_) {
if (!gpu_copied_) {
int64_t num_bounds;
int64_t num_slopes;
int64_t num_intercepts;
CAFFE_ENFORCE_EQ(InputSize(), 1);
const float* bounds;
const float* slopes;
const float* intercepts;
bounds = bounds_from_arg_.data();
slopes = slopes_from_arg_.data();
intercepts = intercepts_from_arg_.data();
num_bounds = bounds_from_arg_.size();
num_slopes = slopes_from_arg_.size();
num_intercepts = intercepts_from_arg_.size();
InferNumFunctionsPerGroup(
num_bounds,
num_slopes,
num_intercepts,
&num_func_per_group,
&num_group);
if (binary_) {
CAFFE_ENFORCE_EQ(num_group, 1);
} else {
CAFFE_ENFORCE_EQ(num_group, M);
}
int length = num_group * num_func_per_group;
Tensor bounds_host{CPU};
bounds_host.Resize(length + num_group);
memcpy(
bounds_host.mutable_data<float>(),
bounds,
(length + num_group) * sizeof(float));
Tensor intercepts_host{CPU};
intercepts_host.Resize(length);
memcpy(
intercepts_host.mutable_data<float>(),
intercepts,
(length) * sizeof(float));
Tensor slopes_host{CPU};
slopes_host.Resize(length);
memcpy(
slopes_host.mutable_data<float>(), slopes, (length) * sizeof(float));
bounds_device_.CopyFrom(bounds_host);
intercepts_device_.CopyFrom(intercepts_host);
slopes_device_.CopyFrom(slopes_host);
gpu_copied_ = true;
}
} else {
int64_t num_bounds;
int64_t num_slopes;
int64_t num_intercepts;
CAFFE_ENFORCE_EQ(InputSize(), 4);
auto& bounds_input = Input(BOUNDS);
auto& slopes_input = Input(SLOPES);
auto& intercepts_input = Input(INTERCEPTS);
num_bounds = bounds_input.numel();
num_slopes = slopes_input.numel();
num_intercepts = intercepts_input.numel();
InferNumFunctionsPerGroup(
num_bounds,
num_slopes,
num_intercepts,
&num_func_per_group,
&num_group);
if (binary_) {
CAFFE_ENFORCE_EQ(num_group, 1);
} else {
CAFFE_ENFORCE_EQ(num_group, M);
}
bounds_device_.CopyFrom(bounds_input);
slopes_device_.CopyFrom(slopes_input);
intercepts_device_.CopyFrom(intercepts_input);
}
}
template <>
bool PiecewiseLinearTransformOp<float, CUDAContext>::TransformGeneral() {
auto& X = Input(0);
CAFFE_ENFORCE_EQ(X.dim(), 2);
int64_t N = X.dim32(0);
int64_t M = X.dim32(1);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
int64_t num_func_per_group;
int64_t num_group;
setUpTensors(num_func_per_group, num_group, M);
hipLaunchKernelGGL(( PieceWiseLinearTransformGeneralKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
M,
num_group,
num_func_per_group,
bounds_device_.data<float>(),
slopes_device_.data<float>(),
intercepts_device_.data<float>(),
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool PiecewiseLinearTransformOp<float, CUDAContext>::TransformBinary() {
auto& X = Input(0);
CAFFE_ENFORCE(X.dim() == 1 || X.dim() == 2);
int64_t N = X.dim32(0);
int64_t M = X.dim() == 2 ? X.dim32(1) : 1;
CAFFE_ENFORCE(
M == 1 || M == 2,
"If binary is set to true, the input must be Nx2 or Nx1 tensor");
auto* Y = Output(0, X.sizes(), at::dtype<float>());
int64_t num_func_per_group;
int64_t num_group;
setUpTensors(num_func_per_group, num_group, M);
if (M == 1) {
hipLaunchKernelGGL(( PieceWiseLinearTransformBinaryKernel1),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
M,
num_group,
num_func_per_group,
bounds_device_.data<float>(),
slopes_device_.data<float>(),
intercepts_device_.data<float>(),
X.data<float>(),
Y->template mutable_data<float>());
} else {
// don't want N*M threads, only N*M/2
hipLaunchKernelGGL(( PieceWiseLinearTransformBinaryKernel2),
dim3(CAFFE_GET_BLOCKS(X.numel() / 2)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
M,
num_group,
num_func_per_group,
bounds_device_.data<float>(),
slopes_device_.data<float>(),
intercepts_device_.data<float>(),
X.data<float>(),
Y->template mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(
PiecewiseLinearTransform,
PiecewiseLinearTransformOp<float, CUDAContext>);
} // namespace caffe2
| e72c7fe64dd2719a4beea338e92264dc3ceadc10.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/piecewise_linear_transform_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
namespace caffe2 {
namespace {
__global__ void PieceWiseLinearTransformGeneralKernel(
const int N,
const int M,
const int num_grp,
const int num_fnc_per_grp,
const float* bounds,
const float* slopes,
const float* intercepts,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(i, N * M) {
int col = i % M;
const float* bounds_group = bounds + (col * (num_fnc_per_grp + 1));
const float* slopes_group = slopes + (col * num_fnc_per_grp);
const float* intercepts_group = intercepts + (col * num_fnc_per_grp);
if (X[i] <= bounds_group[0]) {
Y[i] = slopes_group[0] * bounds_group[0] + intercepts_group[0];
} else if (X[i] >= bounds_group[num_fnc_per_grp]) {
Y[i] = slopes_group[num_fnc_per_grp - 1] * bounds_group[num_fnc_per_grp] +
intercepts_group[num_fnc_per_grp - 1];
} else {
auto low_bound = thrust::lower_bound(
thrust::device,
bounds_group,
bounds_group + num_fnc_per_grp + 1,
X[i]);
int bounds_idx = low_bound - bounds_group - 1;
Y[i] = slopes_group[bounds_idx] * X[i] + intercepts_group[bounds_idx];
}
}
}
} // namespace
namespace {
__global__ void PieceWiseLinearTransformBinaryKernel1(
const int N,
const int M,
const int num_grp,
const int num_fnc_per_grp,
const float* bounds,
const float* slopes,
const float* intercepts,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
if (X[i] <= bounds[0]) {
Y[i] = slopes[0] * bounds[0] + intercepts[0];
} else if (X[i] >= bounds[num_fnc_per_grp]) {
Y[i] = slopes[num_fnc_per_grp - 1] * bounds[num_fnc_per_grp] +
intercepts[num_fnc_per_grp - 1];
} else {
auto low_bound = thrust::lower_bound(
thrust::device, bounds, bounds + num_fnc_per_grp + 1, X[i]);
int bounds_idx = low_bound - bounds - 1;
Y[i] = slopes[bounds_idx] * X[i] + intercepts[bounds_idx];
}
}
}
} // namespace
namespace {
__global__ void PieceWiseLinearTransformBinaryKernel2(
const int N,
const int M,
const int num_grp,
const int num_fnc_per_grp,
const float* bounds,
const float* slopes,
const float* intercepts,
const float* X,
float* Y) {
// N*M/2 = N as M=2
CUDA_1D_KERNEL_LOOP(i, N) {
int index = i * M;
if (X[index + 1] <= bounds[0]) {
Y[index + 1] = slopes[0] * bounds[0] + intercepts[0];
} else if (X[index + 1] >= bounds[num_fnc_per_grp]) {
Y[index + 1] = slopes[num_fnc_per_grp - 1] * bounds[num_fnc_per_grp] +
intercepts[num_fnc_per_grp - 1];
} else {
auto low_bound = thrust::lower_bound(
thrust::device, bounds, bounds + num_fnc_per_grp + 1, X[index + 1]);
int bounds_idx = low_bound - bounds - 1;
Y[index + 1] = slopes[bounds_idx] * X[index + 1] + intercepts[bounds_idx];
}
Y[index] = 1.0f - Y[index + 1];
}
}
} // namespace
template <>
void PiecewiseLinearTransformOp<float, CUDAContext>::setUpTensors(
int64_t& num_func_per_group,
int64_t& num_group,
int64_t M) {
if (transform_param_from_arg_) {
if (!gpu_copied_) {
int64_t num_bounds;
int64_t num_slopes;
int64_t num_intercepts;
CAFFE_ENFORCE_EQ(InputSize(), 1);
const float* bounds;
const float* slopes;
const float* intercepts;
bounds = bounds_from_arg_.data();
slopes = slopes_from_arg_.data();
intercepts = intercepts_from_arg_.data();
num_bounds = bounds_from_arg_.size();
num_slopes = slopes_from_arg_.size();
num_intercepts = intercepts_from_arg_.size();
InferNumFunctionsPerGroup(
num_bounds,
num_slopes,
num_intercepts,
&num_func_per_group,
&num_group);
if (binary_) {
CAFFE_ENFORCE_EQ(num_group, 1);
} else {
CAFFE_ENFORCE_EQ(num_group, M);
}
int length = num_group * num_func_per_group;
Tensor bounds_host{CPU};
bounds_host.Resize(length + num_group);
memcpy(
bounds_host.mutable_data<float>(),
bounds,
(length + num_group) * sizeof(float));
Tensor intercepts_host{CPU};
intercepts_host.Resize(length);
memcpy(
intercepts_host.mutable_data<float>(),
intercepts,
(length) * sizeof(float));
Tensor slopes_host{CPU};
slopes_host.Resize(length);
memcpy(
slopes_host.mutable_data<float>(), slopes, (length) * sizeof(float));
bounds_device_.CopyFrom(bounds_host);
intercepts_device_.CopyFrom(intercepts_host);
slopes_device_.CopyFrom(slopes_host);
gpu_copied_ = true;
}
} else {
int64_t num_bounds;
int64_t num_slopes;
int64_t num_intercepts;
CAFFE_ENFORCE_EQ(InputSize(), 4);
auto& bounds_input = Input(BOUNDS);
auto& slopes_input = Input(SLOPES);
auto& intercepts_input = Input(INTERCEPTS);
num_bounds = bounds_input.numel();
num_slopes = slopes_input.numel();
num_intercepts = intercepts_input.numel();
InferNumFunctionsPerGroup(
num_bounds,
num_slopes,
num_intercepts,
&num_func_per_group,
&num_group);
if (binary_) {
CAFFE_ENFORCE_EQ(num_group, 1);
} else {
CAFFE_ENFORCE_EQ(num_group, M);
}
bounds_device_.CopyFrom(bounds_input);
slopes_device_.CopyFrom(slopes_input);
intercepts_device_.CopyFrom(intercepts_input);
}
}
template <>
bool PiecewiseLinearTransformOp<float, CUDAContext>::TransformGeneral() {
auto& X = Input(0);
CAFFE_ENFORCE_EQ(X.dim(), 2);
int64_t N = X.dim32(0);
int64_t M = X.dim32(1);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
int64_t num_func_per_group;
int64_t num_group;
setUpTensors(num_func_per_group, num_group, M);
PieceWiseLinearTransformGeneralKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
M,
num_group,
num_func_per_group,
bounds_device_.data<float>(),
slopes_device_.data<float>(),
intercepts_device_.data<float>(),
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool PiecewiseLinearTransformOp<float, CUDAContext>::TransformBinary() {
auto& X = Input(0);
CAFFE_ENFORCE(X.dim() == 1 || X.dim() == 2);
int64_t N = X.dim32(0);
int64_t M = X.dim() == 2 ? X.dim32(1) : 1;
CAFFE_ENFORCE(
M == 1 || M == 2,
"If binary is set to true, the input must be Nx2 or Nx1 tensor");
auto* Y = Output(0, X.sizes(), at::dtype<float>());
int64_t num_func_per_group;
int64_t num_group;
setUpTensors(num_func_per_group, num_group, M);
if (M == 1) {
PieceWiseLinearTransformBinaryKernel1<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
M,
num_group,
num_func_per_group,
bounds_device_.data<float>(),
slopes_device_.data<float>(),
intercepts_device_.data<float>(),
X.data<float>(),
Y->template mutable_data<float>());
} else {
// don't want N*M threads, only N*M/2
PieceWiseLinearTransformBinaryKernel2<<<
CAFFE_GET_BLOCKS(X.numel() / 2),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
M,
num_group,
num_func_per_group,
bounds_device_.data<float>(),
slopes_device_.data<float>(),
intercepts_device_.data<float>(),
X.data<float>(),
Y->template mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(
PiecewiseLinearTransform,
PiecewiseLinearTransformOp<float, CUDAContext>);
} // namespace caffe2
|
8c40e3e344f74f7ab125a85d668ab07fcc6a121f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float* var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
if (comp <= -1.6060E36f / ldexpf(+1.5617E35f, 2)) {
comp += (+1.3779E36f - floorf(-1.2482E-43f));
comp = fmodf(-1.5149E36f / var_4, sinhf(-0.0f + (var_5 + var_6 * +1.0563E35f + (-1.6218E-43f + -1.5339E-35f))));
float tmp_1 = acosf(+1.3883E20f * atanf((+0.0f + (var_7 / var_8 / var_9))));
comp = tmp_1 / var_10 + var_11 / var_12 * var_13 - (-0.0f * +1.6618E-41f);
for (int i=0; i < var_3; ++i) {
var_14[i] = +1.2734E-37f;
comp = var_14[i] + logf(+0.0f);
comp += (var_15 * var_16 * var_17);
comp = (-1.4694E-36f * (+1.7018E3f + +1.7157E-36f - (var_18 / -1.8436E36f)));
}
if (comp <= (var_19 - +1.4128E34f * (-1.2866E35f - (var_20 - (+1.9375E35f + var_21))))) {
comp += (var_22 * +1.8236E34f - var_23 * var_24 - var_25);
float tmp_2 = +1.8835E-26f;
comp = tmp_2 + (var_26 - -1.8132E-35f);
}
if (comp < sinf(+1.0853E-44f)) {
float tmp_3 = var_27 * var_28;
comp += tmp_3 / (-1.6435E36f - -1.6854E-44f);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float* tmp_15 = initPointer( atof(argv[15]) );
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29);
hipDeviceSynchronize();
return 0;
}
| 8c40e3e344f74f7ab125a85d668ab07fcc6a121f.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float* var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
if (comp <= -1.6060E36f / ldexpf(+1.5617E35f, 2)) {
comp += (+1.3779E36f - floorf(-1.2482E-43f));
comp = fmodf(-1.5149E36f / var_4, sinhf(-0.0f + (var_5 + var_6 * +1.0563E35f + (-1.6218E-43f + -1.5339E-35f))));
float tmp_1 = acosf(+1.3883E20f * atanf((+0.0f + (var_7 / var_8 / var_9))));
comp = tmp_1 / var_10 + var_11 / var_12 * var_13 - (-0.0f * +1.6618E-41f);
for (int i=0; i < var_3; ++i) {
var_14[i] = +1.2734E-37f;
comp = var_14[i] + logf(+0.0f);
comp += (var_15 * var_16 * var_17);
comp = (-1.4694E-36f * (+1.7018E3f + +1.7157E-36f - (var_18 / -1.8436E36f)));
}
if (comp <= (var_19 - +1.4128E34f * (-1.2866E35f - (var_20 - (+1.9375E35f + var_21))))) {
comp += (var_22 * +1.8236E34f - var_23 * var_24 - var_25);
float tmp_2 = +1.8835E-26f;
comp = tmp_2 + (var_26 - -1.8132E-35f);
}
if (comp < sinf(+1.0853E-44f)) {
float tmp_3 = var_27 * var_28;
comp += tmp_3 / (-1.6435E36f - -1.6854E-44f);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float* tmp_15 = initPointer( atof(argv[15]) );
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29);
cudaDeviceSynchronize();
return 0;
}
|
bdf57d74162ae6a25ee49571276b83a64b53884c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2021 Roberto Lopez Castro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../FX_m2.cu"
#include "store_and_transform_output_baseline.cuh"
#include "../outer_product.cuh"
#ifdef _noWALL_
typedef struct rusage resnfo;
typedef struct _timenfo {
double time;
double systime;
} timenfo;
#define timestamp(sample) getrusage(RUSAGE_SELF, (sample))
#define printtime(t) printf("%15f s (%f user + %f sys) ", \
t.time + t.systime, t.time, t.systime);
#else
typedef struct timeval resnfo;
typedef double timenfo;
#define timestamp(sample) gettimeofday((sample), 0)
#define printtime(t) printf("%15f s ", t);
#endif
#ifndef _WINOGRAD_
#define _WINOGRAD_
extern "C"
{
#define d(input, i, j) ( input[(i<<2) + (j)] )
__device__ __forceinline__ void load_and_transform_input_tile(float *Btd, float *pOutputs, int in_h, int in_w,
int tiles_dim, int in_c, int in_n, int tile_size,
int tiles_2d_dim, int tile_2d_s){
float workspace[3];
#pragma unroll
for(int j=0; j<4; j++){
workspace[0] = Btd[j];
workspace[1] = Btd[j+4];
workspace[2] = Btd[j+8];
Btd[j] = workspace[0] - workspace[2];
Btd[j+4] = workspace[1] + workspace[2];
Btd[j+8] = workspace[2] - workspace[1];
Btd[j+12] = workspace[1] - Btd[j+12];
}
int c_offset = BN*BC;
int c_tensor = threadIdx.y*BN + threadIdx.x;
#pragma unroll
for(int i=0; i<4; i++){ // prefetch 1 input tile/thread
pOutputs[c_tensor+i*c_offset*4] = d(Btd, i, 0) - d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+c_offset] = d(Btd, i, 1) + d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+2*c_offset] = d(Btd, i, 2) - d(Btd, i, 1);
pOutputs[c_tensor+i*c_offset*4+3*c_offset] = d(Btd, i, 1) - d(Btd, i, 3);
}
}
__device__ __forceinline__ void load_filter_tile(float *tiles, float *pOutputs,
int filt_c, int filt_k){
int c_tensor_s = threadIdx.y*BK + threadIdx.x;
int c_offset_s = BK*BC;
for(int k=0; k<2; k++){ // prefetch 2 filter tiles/thread
for(int i=0; i<4; i++){
#pragma unroll
for(int j=0; j<4; j++){
pOutputs[c_tensor_s + i*c_offset_s*4 + j*c_offset_s] = tiles[k*16 + i*4 + j];
}
}
c_tensor_s += BN;
}
}
__device__ __forceinline__ void prefetch_filter_tile(float *pInputs, float *tiles, int filt_k){
int c_tensor = blockIdx.z*BK + (threadIdx.y*filt_k<<4) + threadIdx.x; // Iny*filt_k*4*4
int acumm;
#pragma unroll
for(int i=0; i<4; i++){
acumm = (i*filt_k<<2);
#pragma unroll
for(int j=0; j<4; j++){
tiles[(i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor];
tiles[16 + (i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor+BN];
}
}
}
__device__ __forceinline__ void prefetch_input_tile(float *pInputs, float *tile, int in_h, int in_w, int in_n, int tiles_dim, short mask){
int c_tensor = (blockIdx.y%tiles_dim)*in_n*2 + (blockIdx.y/tiles_dim)*in_n*in_w*2 + blockIdx.x*BN + threadIdx.y*(in_n*in_h*in_w) + (threadIdx.x/in_n)*2*in_n + (threadIdx.x%in_n) - (in_n*in_w+in_n);
int acumm,x;
//short x1,x2;
if(mask==0xFFFF){
#pragma unroll
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
tile[(i<<2) + j] = pInputs[acumm + j*in_n + c_tensor];
}
}
} else {
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
x = (i<<2) + j;
tile[x] = 0;
if(mask&(1<<x))
tile[x]=pInputs[acumm + j*in_n + c_tensor];
}
}
}
}
__device__ __forceinline__ void prefetch_filter_frag(float4 *filter_frag, float4 *B_frag, int f_frag_offset, int offset1, int offset2){
//float4 *B_start = (float4*) (B_frag);
*((float4*) (filter_frag)) = *(B_frag + offset1);
*((float4*) (filter_frag + 1)) = *(B_frag + offset2);
*((float4*) (filter_frag + 2)) = *(B_frag + f_frag_offset + offset1);
*((float4*) (filter_frag + 3)) = *(B_frag + f_frag_offset + offset2);
}
__device__ __forceinline__ void prefetch_input_frag(float4* input_frag, float4 *A_frag, int frag_offset, int offset1, int offset2){
//float4 *A_start = (float4*) (A_frag);
*((float4*) (input_frag)) = *(A_frag + offset1); //ld_shared(A_frag + offset1);
*((float4*) (input_frag + 1)) = *(A_frag + offset2);
*((float4*) (input_frag + 2)) = *(A_frag + frag_offset + offset1);
*((float4*) (input_frag + 3)) = *(A_frag + frag_offset + offset2); //3=2+1
}
__global__ void Winograd_kernel(float *A, float *B, float *C,
int tiles_dim, int in_c, int in_n, int in_h, int in_w,
int tile_size, int filt_k, int filt_c,
int tiles_2d_dim, int out_c, int out_n,
int tile_2d_s, int out_h, int out_w){
extern __shared__ float shared_mem[];
float *input_smem = (float*)shared_mem;
float *filter_smem = (float*)&shared_mem[16*BC*BN];
short m = 0xFFFF;
if((blockIdx.y/tiles_dim)==0) m&=0xFFF0;
if((blockIdx.y/tiles_dim)==(tiles_dim-1)) m &= (!(in_w%2))?(0x0FFF):(0x00FF);
if(!((blockIdx.y+1)%tiles_dim)) m &= (!(in_w%2))?(0x7777):(0x3333);
if(!((blockIdx.y)%tiles_dim)) m&=0xeeee;
float img_tile[16]; // Prefetch input from GMEM
float filter_tile[32]; // Prefetch filter from GMEM
float4 input_frag_mem[8]; //2*2(2*8/4) Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 filter_frag_mem[8]; //2*2 Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 accumulator[2][16] = {0.0f}; // Accumulators
float4 *A_frag; // Input data pointer
int frag_offset = 2* (BC*BN); // (2=8/4) SMEM input read offset
float4 *B_frag; // Filter data pointer
int f_frag_offset = 2* (BC*BK); // (2=8/4) SMEM filter read offset
float4 *input_frag = (float4*) input_frag_mem;
float4 *filter_frag = (float4*) filter_frag_mem;
float4 *swap;
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
float4 *input_frag_buffer = (float4*) (input_frag+4);
float4 *filter_frag_buffer = (float4*) (filter_frag+4);
// Mainloop - iterates over the entire K dimension - not unrolled
for(int iter=0; iter<in_c; iter+=BC){ // Current iteration
A_frag = (float4*) (input_smem + threadIdx.y*BC*BN);
B_frag = (float4*) (filter_smem + threadIdx.y*BC*BK);
load_and_transform_input_tile(img_tile, input_smem, in_h, in_w,
tiles_dim, in_c, in_n, tile_size,
tiles_2d_dim, tile_2d_s);
load_filter_tile(filter_tile, filter_smem, filt_c, filt_k);
__syncthreads();
prefetch_input_frag(input_frag, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
#pragma unroll
for(int i=0; i<BC; i++){
if(i<(BC-1)){
A_frag += BN/4;
B_frag += BK/4;
prefetch_input_frag(input_frag_buffer, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag_buffer, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
}
outer_product(input_frag, filter_frag, accumulator);
swap = input_frag;
input_frag = input_frag_buffer;
input_frag_buffer = swap;
swap = filter_frag;
filter_frag = filter_frag_buffer;
filter_frag_buffer = swap;
}
A += in_n*BC*in_w*in_h;
B += filt_k*BC*4*4;
if(iter<(in_c-BC)){
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
}
__syncthreads();
}
// Transpose, transform and store accumulated result
store_output_tile(accumulator, shared_mem, C, out_h, out_w, tiles_dim, out_n, input_frag_mem, filter_frag_mem, out_thread, access_s_out, m);
}
hipError_t convolutionForward_32x64x8(float *k, int in_h, int in_w, float *w, int out_h,
int out_w, int out_n, int out_c, float *C, float *Ww,
const unsigned int n,
int tiles_dim, int in_n, int tile_size,
int in_c, int filt_k, int filt_c, int filt_h, int filt_w, int alpha, int m){
int tile_2d_s = tile_size*tile_size;
int tiles_2d_dim = tiles_dim*tiles_dim;
int smem_size = 16*BC*BN + 16*BC*BK;
hipLaunchKernelGGL(( FX), dim3(dim3(filt_k/BK, filt_c/BC)), dim3(dim3(BN, BC)), 0, 0, w, Ww, filt_k, filt_c, filt_h, filt_w, alpha);
hipLaunchKernelGGL(( Winograd_kernel), dim3(dim3(in_n/BN, tiles_2d_dim, filt_k/BK)), dim3(dim3(BN, 8)), (smem_size)<<2 , 0, 0, k, Ww, C,
tiles_dim, in_c, in_n, in_h, in_w, tile_size,
filt_k, filt_c, tiles_2d_dim, out_c, out_n, tile_2d_s,
out_h, out_w);
return hipGetLastError();
}
}
#endif
| bdf57d74162ae6a25ee49571276b83a64b53884c.cu | // Copyright 2021 Roberto Lopez Castro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../FX_m2.cu"
#include "store_and_transform_output_baseline.cuh"
#include "../outer_product.cuh"
#ifdef _noWALL_
typedef struct rusage resnfo;
typedef struct _timenfo {
double time;
double systime;
} timenfo;
#define timestamp(sample) getrusage(RUSAGE_SELF, (sample))
#define printtime(t) printf("%15f s (%f user + %f sys) ", \
t.time + t.systime, t.time, t.systime);
#else
typedef struct timeval resnfo;
typedef double timenfo;
#define timestamp(sample) gettimeofday((sample), 0)
#define printtime(t) printf("%15f s ", t);
#endif
#ifndef _WINOGRAD_
#define _WINOGRAD_
extern "C"
{
#define d(input, i, j) ( input[(i<<2) + (j)] )
__device__ __forceinline__ void load_and_transform_input_tile(float *Btd, float *pOutputs, int in_h, int in_w,
int tiles_dim, int in_c, int in_n, int tile_size,
int tiles_2d_dim, int tile_2d_s){
float workspace[3];
#pragma unroll
for(int j=0; j<4; j++){
workspace[0] = Btd[j];
workspace[1] = Btd[j+4];
workspace[2] = Btd[j+8];
Btd[j] = workspace[0] - workspace[2];
Btd[j+4] = workspace[1] + workspace[2];
Btd[j+8] = workspace[2] - workspace[1];
Btd[j+12] = workspace[1] - Btd[j+12];
}
int c_offset = BN*BC;
int c_tensor = threadIdx.y*BN + threadIdx.x;
#pragma unroll
for(int i=0; i<4; i++){ // prefetch 1 input tile/thread
pOutputs[c_tensor+i*c_offset*4] = d(Btd, i, 0) - d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+c_offset] = d(Btd, i, 1) + d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+2*c_offset] = d(Btd, i, 2) - d(Btd, i, 1);
pOutputs[c_tensor+i*c_offset*4+3*c_offset] = d(Btd, i, 1) - d(Btd, i, 3);
}
}
__device__ __forceinline__ void load_filter_tile(float *tiles, float *pOutputs,
int filt_c, int filt_k){
int c_tensor_s = threadIdx.y*BK + threadIdx.x;
int c_offset_s = BK*BC;
for(int k=0; k<2; k++){ // prefetch 2 filter tiles/thread
for(int i=0; i<4; i++){
#pragma unroll
for(int j=0; j<4; j++){
pOutputs[c_tensor_s + i*c_offset_s*4 + j*c_offset_s] = tiles[k*16 + i*4 + j];
}
}
c_tensor_s += BN;
}
}
__device__ __forceinline__ void prefetch_filter_tile(float *pInputs, float *tiles, int filt_k){
int c_tensor = blockIdx.z*BK + (threadIdx.y*filt_k<<4) + threadIdx.x; // Iny*filt_k*4*4
int acumm;
#pragma unroll
for(int i=0; i<4; i++){
acumm = (i*filt_k<<2);
#pragma unroll
for(int j=0; j<4; j++){
tiles[(i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor];
tiles[16 + (i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor+BN];
}
}
}
__device__ __forceinline__ void prefetch_input_tile(float *pInputs, float *tile, int in_h, int in_w, int in_n, int tiles_dim, short mask){
int c_tensor = (blockIdx.y%tiles_dim)*in_n*2 + (blockIdx.y/tiles_dim)*in_n*in_w*2 + blockIdx.x*BN + threadIdx.y*(in_n*in_h*in_w) + (threadIdx.x/in_n)*2*in_n + (threadIdx.x%in_n) - (in_n*in_w+in_n);
int acumm,x;
//short x1,x2;
if(mask==0xFFFF){
#pragma unroll
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
tile[(i<<2) + j] = pInputs[acumm + j*in_n + c_tensor];
}
}
} else {
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
x = (i<<2) + j;
tile[x] = 0;
if(mask&(1<<x))
tile[x]=pInputs[acumm + j*in_n + c_tensor];
}
}
}
}
__device__ __forceinline__ void prefetch_filter_frag(float4 *filter_frag, float4 *B_frag, int f_frag_offset, int offset1, int offset2){
//float4 *B_start = (float4*) (B_frag);
*((float4*) (filter_frag)) = *(B_frag + offset1);
*((float4*) (filter_frag + 1)) = *(B_frag + offset2);
*((float4*) (filter_frag + 2)) = *(B_frag + f_frag_offset + offset1);
*((float4*) (filter_frag + 3)) = *(B_frag + f_frag_offset + offset2);
}
__device__ __forceinline__ void prefetch_input_frag(float4* input_frag, float4 *A_frag, int frag_offset, int offset1, int offset2){
//float4 *A_start = (float4*) (A_frag);
*((float4*) (input_frag)) = *(A_frag + offset1); //ld_shared(A_frag + offset1);
*((float4*) (input_frag + 1)) = *(A_frag + offset2);
*((float4*) (input_frag + 2)) = *(A_frag + frag_offset + offset1);
*((float4*) (input_frag + 3)) = *(A_frag + frag_offset + offset2); //3=2+1
}
__global__ void Winograd_kernel(float *A, float *B, float *C,
int tiles_dim, int in_c, int in_n, int in_h, int in_w,
int tile_size, int filt_k, int filt_c,
int tiles_2d_dim, int out_c, int out_n,
int tile_2d_s, int out_h, int out_w){
extern __shared__ float shared_mem[];
float *input_smem = (float*)shared_mem;
float *filter_smem = (float*)&shared_mem[16*BC*BN];
short m = 0xFFFF;
if((blockIdx.y/tiles_dim)==0) m&=0xFFF0;
if((blockIdx.y/tiles_dim)==(tiles_dim-1)) m &= (!(in_w%2))?(0x0FFF):(0x00FF);
if(!((blockIdx.y+1)%tiles_dim)) m &= (!(in_w%2))?(0x7777):(0x3333);
if(!((blockIdx.y)%tiles_dim)) m&=0xeeee;
float img_tile[16]; // Prefetch input from GMEM
float filter_tile[32]; // Prefetch filter from GMEM
float4 input_frag_mem[8]; //2*2(2*8/4) Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 filter_frag_mem[8]; //2*2 Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 accumulator[2][16] = {0.0f}; // Accumulators
float4 *A_frag; // Input data pointer
int frag_offset = 2* (BC*BN); // (2=8/4) SMEM input read offset
float4 *B_frag; // Filter data pointer
int f_frag_offset = 2* (BC*BK); // (2=8/4) SMEM filter read offset
float4 *input_frag = (float4*) input_frag_mem;
float4 *filter_frag = (float4*) filter_frag_mem;
float4 *swap;
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
float4 *input_frag_buffer = (float4*) (input_frag+4);
float4 *filter_frag_buffer = (float4*) (filter_frag+4);
// Mainloop - iterates over the entire K dimension - not unrolled
for(int iter=0; iter<in_c; iter+=BC){ // Current iteration
A_frag = (float4*) (input_smem + threadIdx.y*BC*BN);
B_frag = (float4*) (filter_smem + threadIdx.y*BC*BK);
load_and_transform_input_tile(img_tile, input_smem, in_h, in_w,
tiles_dim, in_c, in_n, tile_size,
tiles_2d_dim, tile_2d_s);
load_filter_tile(filter_tile, filter_smem, filt_c, filt_k);
__syncthreads();
prefetch_input_frag(input_frag, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
#pragma unroll
for(int i=0; i<BC; i++){
if(i<(BC-1)){
A_frag += BN/4;
B_frag += BK/4;
prefetch_input_frag(input_frag_buffer, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag_buffer, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
}
outer_product(input_frag, filter_frag, accumulator);
swap = input_frag;
input_frag = input_frag_buffer;
input_frag_buffer = swap;
swap = filter_frag;
filter_frag = filter_frag_buffer;
filter_frag_buffer = swap;
}
A += in_n*BC*in_w*in_h;
B += filt_k*BC*4*4;
if(iter<(in_c-BC)){
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
}
__syncthreads();
}
// Transpose, transform and store accumulated result
store_output_tile(accumulator, shared_mem, C, out_h, out_w, tiles_dim, out_n, input_frag_mem, filter_frag_mem, out_thread, access_s_out, m);
}
cudaError_t convolutionForward_32x64x8(float *k, int in_h, int in_w, float *w, int out_h,
int out_w, int out_n, int out_c, float *C, float *Ww,
const unsigned int n,
int tiles_dim, int in_n, int tile_size,
int in_c, int filt_k, int filt_c, int filt_h, int filt_w, int alpha, int m){
int tile_2d_s = tile_size*tile_size;
int tiles_2d_dim = tiles_dim*tiles_dim;
int smem_size = 16*BC*BN + 16*BC*BK;
FX<<<dim3(filt_k/BK, filt_c/BC), dim3(BN, BC)>>>(w, Ww, filt_k, filt_c, filt_h, filt_w, alpha);
Winograd_kernel<<<dim3(in_n/BN, tiles_2d_dim, filt_k/BK), dim3(BN, 8), (smem_size)<<2 >>>(k, Ww, C,
tiles_dim, in_c, in_n, in_h, in_w, tile_size,
filt_k, filt_c, tiles_2d_dim, out_c, out_n, tile_2d_s,
out_h, out_w);
return cudaGetLastError();
}
}
#endif
|
482528b3532776ab230533543fe02d105134d225.hip | // !!! This is a file automatically generated by hipify!!!
//#############################################################################
// File: SLOptixRaytracerShading.cu
// Purpose: CUDA Shader file used in Optix Tracing
// Author: Nic Dorner
// Date: October 2019
// Copyright: Nic Dorner
// This software is provide under the GNU General Public License
// Please visit: http://opensource.org/licenses/GPL-3.0
//#############################################################################
#include <SLOptixHelper.h>
#include <SLOptixDefinitions.h>
#include <hip/hip_runtime_api.h>
//-----------------------------------------------------------------------------
extern "C" {
__constant__ ortParams params;
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{
auto* rt_data = reinterpret_cast<ortMissData*>(optixGetSbtDataPointer());
setColor(rt_data->bg_color);
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __miss__occlusion()
{
}
extern "C" __global__ void __anyhit__radiance()
{
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __anyhit__occlusion()
{
auto* rt_data = reinterpret_cast<ortHitData*>(optixGetSbtDataPointer());
setLighted(getLighted() - (1.0f - rt_data->material.kt));
optixIgnoreIntersection();
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __closesthit__radiance()
{
uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Get all data for the hit point
auto* rt_data = reinterpret_cast<ortHitData*>(optixGetSbtDataPointer());
const float3 ray_dir = optixGetWorldRayDirection();
// calculate normal vector
float3 N = getNormalVector();
// calculate texture color
float4 texture_color = getTextureColor();
// calculate hit point
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax() * ray_dir;
// initialize color
float4 color = make_float4(0.0f);
{
float4 local_color = make_float4(0.0f);
float4 specular_color = make_float4(0.0f);
// Add emissive and ambient to current color
local_color += rt_data->material.emissive_color;
local_color += rt_data->material.ambient_color * params.globalAmbientColor;
// calculate local illumination for every light source
for (int i = 0; i < params.numLights; i++)
{
const ortLight light = params.lights[i];
const float Ldist = length(light.position - P);
const float3 L = normalize(light.position - P);
const float LdotN = dot(L, N);
// Phong specular reflection
// const float3 R = normalize(reflect(-L, N));
// const float3 V = normalize(-ray_dir);
// powf( max(dot(R, V), 0.0), rt_data->material.shininess )
// Blinn specular reflection
const float3 H = normalize(L - ray_dir); // half vector between light & eye
if (LdotN > 0.0f)
{
float lighted = 0.0f;
float3 lightDiscX = normalize(make_float3(1, 1, (-1 * (L.x + L.y) / L.z)));
float3 lightDiscY = cross(L, lightDiscX);
for (unsigned int r = 1; r <= light.samples.samplesX; r++)
{
for (unsigned int q = 1; q <= light.samples.samplesY; q++)
{
const float phi = (2.0f / light.samples.samplesY) * q;
const float3 discPoint = light.position +
(normalize(lightDiscX) * cospif(phi) * (light.radius / light.samples.samplesX) * r) +
(normalize(lightDiscY) * sinpif(phi) * (light.radius / light.samples.samplesX) * r);
const float3 direction = normalize(discPoint - P);
const float discDist = length(discPoint - P);
const float sampleLighted = traceShadowRay(params.handle,
P,
direction,
discDist);
if (sampleLighted > 0)
{
lighted += sampleLighted / (light.samples.samplesX * light.samples.samplesY);
}
}
}
// Phong shading
if (lighted > 0.0f)
{
// calculate spot effect if light is a spotlight
float spotEffect = 1.0f;
;
if (light.spotCutOffDEG < 180.0f)
{
float LdS = max(dot(-L, light.spotDirWS), 0.0f);
// check if point is in spot cone
if (LdS > light.spotCosCut)
{
spotEffect = powf(LdS, light.spotExponent);
}
else
{
lighted = 0.0f;
spotEffect = 0.0f;
}
}
local_color +=
(rt_data->material.diffuse_color * max(LdotN, 0.0f)) // diffuse
* lighted // lighted
* light.diffuse_color // multiply with diffuse light color
* lightAttenuation(light, Ldist) // multiply with light attenuation
* spotEffect;
specular_color +=
(rt_data->material.specular_color * powf(max(dot(N, H), 0.0), rt_data->material.shininess)) // specular
* lighted // lighted
* light.specular_color // multiply with specular light color
* lightAttenuation(light, Ldist) // multiply with light attenuation
* spotEffect;
}
}
local_color += rt_data->material.ambient_color * lightAttenuation(light, Ldist) * light.ambient_color;
}
// multiply local color with texture color and add specular color afterwards
color += (local_color * texture_color) + specular_color;
}
// Send reflection ray
if (getDepth() < params.max_depth && rt_data->material.kr > 0.0f)
{
color += (traceReflectionRay(params.handle,
P,
N,
ray_dir) *
rt_data->material.kr);
}
// Send refraction ray
if (getDepth() < params.max_depth && rt_data->material.kt > 0.0f)
{
color += (traceRefractionRay(params.handle,
P,
N,
ray_dir,
rt_data->material.kn) *
rt_data->material.kt);
}
// Set color to payload
setColor(color);
}
//----------------------------------------------------------------------------- | 482528b3532776ab230533543fe02d105134d225.cu | //#############################################################################
// File: SLOptixRaytracerShading.cu
// Purpose: CUDA Shader file used in Optix Tracing
// Author: Nic Dorner
// Date: October 2019
// Copyright: Nic Dorner
// This software is provide under the GNU General Public License
// Please visit: http://opensource.org/licenses/GPL-3.0
//#############################################################################
#include <SLOptixHelper.h>
#include <SLOptixDefinitions.h>
#include <cuda_runtime_api.h>
//-----------------------------------------------------------------------------
extern "C" {
__constant__ ortParams params;
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{
auto* rt_data = reinterpret_cast<ortMissData*>(optixGetSbtDataPointer());
setColor(rt_data->bg_color);
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __miss__occlusion()
{
}
extern "C" __global__ void __anyhit__radiance()
{
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __anyhit__occlusion()
{
auto* rt_data = reinterpret_cast<ortHitData*>(optixGetSbtDataPointer());
setLighted(getLighted() - (1.0f - rt_data->material.kt));
optixIgnoreIntersection();
}
//-----------------------------------------------------------------------------
extern "C" __global__ void __closesthit__radiance()
{
uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Get all data for the hit point
auto* rt_data = reinterpret_cast<ortHitData*>(optixGetSbtDataPointer());
const float3 ray_dir = optixGetWorldRayDirection();
// calculate normal vector
float3 N = getNormalVector();
// calculate texture color
float4 texture_color = getTextureColor();
// calculate hit point
const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax() * ray_dir;
// initialize color
float4 color = make_float4(0.0f);
{
float4 local_color = make_float4(0.0f);
float4 specular_color = make_float4(0.0f);
// Add emissive and ambient to current color
local_color += rt_data->material.emissive_color;
local_color += rt_data->material.ambient_color * params.globalAmbientColor;
// calculate local illumination for every light source
for (int i = 0; i < params.numLights; i++)
{
const ortLight light = params.lights[i];
const float Ldist = length(light.position - P);
const float3 L = normalize(light.position - P);
const float LdotN = dot(L, N);
// Phong specular reflection
// const float3 R = normalize(reflect(-L, N));
// const float3 V = normalize(-ray_dir);
// powf( max(dot(R, V), 0.0), rt_data->material.shininess )
// Blinn specular reflection
const float3 H = normalize(L - ray_dir); // half vector between light & eye
if (LdotN > 0.0f)
{
float lighted = 0.0f;
float3 lightDiscX = normalize(make_float3(1, 1, (-1 * (L.x + L.y) / L.z)));
float3 lightDiscY = cross(L, lightDiscX);
for (unsigned int r = 1; r <= light.samples.samplesX; r++)
{
for (unsigned int q = 1; q <= light.samples.samplesY; q++)
{
const float phi = (2.0f / light.samples.samplesY) * q;
const float3 discPoint = light.position +
(normalize(lightDiscX) * cospif(phi) * (light.radius / light.samples.samplesX) * r) +
(normalize(lightDiscY) * sinpif(phi) * (light.radius / light.samples.samplesX) * r);
const float3 direction = normalize(discPoint - P);
const float discDist = length(discPoint - P);
const float sampleLighted = traceShadowRay(params.handle,
P,
direction,
discDist);
if (sampleLighted > 0)
{
lighted += sampleLighted / (light.samples.samplesX * light.samples.samplesY);
}
}
}
// Phong shading
if (lighted > 0.0f)
{
// calculate spot effect if light is a spotlight
float spotEffect = 1.0f;
;
if (light.spotCutOffDEG < 180.0f)
{
float LdS = max(dot(-L, light.spotDirWS), 0.0f);
// check if point is in spot cone
if (LdS > light.spotCosCut)
{
spotEffect = powf(LdS, light.spotExponent);
}
else
{
lighted = 0.0f;
spotEffect = 0.0f;
}
}
local_color +=
(rt_data->material.diffuse_color * max(LdotN, 0.0f)) // diffuse
* lighted // lighted
* light.diffuse_color // multiply with diffuse light color
* lightAttenuation(light, Ldist) // multiply with light attenuation
* spotEffect;
specular_color +=
(rt_data->material.specular_color * powf(max(dot(N, H), 0.0), rt_data->material.shininess)) // specular
* lighted // lighted
* light.specular_color // multiply with specular light color
* lightAttenuation(light, Ldist) // multiply with light attenuation
* spotEffect;
}
}
local_color += rt_data->material.ambient_color * lightAttenuation(light, Ldist) * light.ambient_color;
}
// multiply local color with texture color and add specular color afterwards
color += (local_color * texture_color) + specular_color;
}
// Send reflection ray
if (getDepth() < params.max_depth && rt_data->material.kr > 0.0f)
{
color += (traceReflectionRay(params.handle,
P,
N,
ray_dir) *
rt_data->material.kr);
}
// Send refraction ray
if (getDepth() < params.max_depth && rt_data->material.kt > 0.0f)
{
color += (traceRefractionRay(params.handle,
P,
N,
ray_dir,
rt_data->material.kn) *
rt_data->material.kt);
}
// Set color to payload
setColor(color);
}
//----------------------------------------------------------------------------- |
b3bc69ab2087e7fe90db0c2436b63d4c364d5d92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_getRandomR(double* dev_mat, double* dev_ramR, int* dev_nc)
{
// blockIdx.x -> index of each image
// threadIdx.x -> index of each insertation of each image
if (threadIdx.x < dev_nc[blockIdx.x]) // true if this image should be inserted
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
extern __shared__ double matS[];
double *mat, *res;
mat = matS + threadIdx.x * 18;
res = mat + 9;
mat[0] = 0; mat[4] = 0; mat[8] = 0;
mat[5] = dev_ramR[tid * 4 + 1];
mat[6] = dev_ramR[tid * 4 + 2];
mat[1] = dev_ramR[tid * 4 + 3];
mat[7] = -mat[5];
mat[2] = -mat[6];
mat[3] = -mat[1];
for(int i = 0; i < 9; i++)
res[i] = 0;
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
for (int k = 0; k < 3; k++)
res[i + j * 3] += mat[i + k * 3] * mat[k + j * 3];
double scale = 2 * dev_ramR[tid * 4];
for (int n = 0; n < 9; n++)
{
mat[n] *= scale;
mat[n] += res[n] * 2;
}
mat[0] += 1;
mat[4] += 1;
mat[8] += 1;
for (int n = 0; n < 9; n++)
{
dev_mat[tid * 9 + n] = mat[n];
}
}
} | b3bc69ab2087e7fe90db0c2436b63d4c364d5d92.cu | #include "includes.h"
__global__ void kernel_getRandomR(double* dev_mat, double* dev_ramR, int* dev_nc)
{
// blockIdx.x -> index of each image
// threadIdx.x -> index of each insertation of each image
if (threadIdx.x < dev_nc[blockIdx.x]) // true if this image should be inserted
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
extern __shared__ double matS[];
double *mat, *res;
mat = matS + threadIdx.x * 18;
res = mat + 9;
mat[0] = 0; mat[4] = 0; mat[8] = 0;
mat[5] = dev_ramR[tid * 4 + 1];
mat[6] = dev_ramR[tid * 4 + 2];
mat[1] = dev_ramR[tid * 4 + 3];
mat[7] = -mat[5];
mat[2] = -mat[6];
mat[3] = -mat[1];
for(int i = 0; i < 9; i++)
res[i] = 0;
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
for (int k = 0; k < 3; k++)
res[i + j * 3] += mat[i + k * 3] * mat[k + j * 3];
double scale = 2 * dev_ramR[tid * 4];
for (int n = 0; n < 9; n++)
{
mat[n] *= scale;
mat[n] += res[n] * 2;
}
mat[0] += 1;
mat[4] += 1;
mat[8] += 1;
for (int n = 0; n < 9; n++)
{
dev_mat[tid * 9 + n] = mat[n];
}
}
} |
345d9a3b8dc6de1226052add2b55d66295222d44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from ztrtri_diag.cu normal z -> d, Wed Sep 17 15:08:23 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "common_magma.h"
#include "dtrtri.h"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Same as dtrtri_diag, but adds queue argument.
@ingroup magma_dblas3
********************************************************************/
/**
Purpose
-------
dtrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA DOUBLE_PRECISION array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA DOUBLE_PRECISION array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag_q(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const double *dA, magma_int_t ldda,
double *d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = (n + IB - 1)/IB;
hipMemset( d_dinvA, 0, ((n+NB-1)/NB)*NB*NB * sizeof(double) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
hipLaunchKernelGGL(( dtrtri_diag_kernel_lower), dim3(nblocks), dim3(IB), 0, queue , diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_lower), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
hipLaunchKernelGGL(( dtrtri_diag_kernel_upper), dim3(nblocks), dim3(IB), 0, queue , diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_upper), dim3(grid), dim3(threads), 0, queue , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
/**
@see magmablas_dtrtri_diag_q
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const double *dA, magma_int_t ldda,
double *d_dinvA)
{
magmablas_dtrtri_diag_q( uplo, diag, n, dA, ldda, d_dinvA, magma_stream );
}
| 345d9a3b8dc6de1226052add2b55d66295222d44.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from ztrtri_diag.cu normal z -> d, Wed Sep 17 15:08:23 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "common_magma.h"
#include "dtrtri.h"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Same as dtrtri_diag, but adds queue argument.
@ingroup magma_dblas3
********************************************************************/
/**
Purpose
-------
dtrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA DOUBLE_PRECISION array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA DOUBLE_PRECISION array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag_q(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const double *dA, magma_int_t ldda,
double *d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = (n + IB - 1)/IB;
cudaMemset( d_dinvA, 0, ((n+NB-1)/NB)*NB*NB * sizeof(double) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dtrtri_diag_kernel_lower<<< nblocks, IB, 0, queue >>>( diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_dgemm16_part1_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm16_part2_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_dgemm32_part1_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm32_part2_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_dgemm64_part1_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm64_part2_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_dgemm_above64_part1_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part2_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part3_lower<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dtrtri_diag_kernel_upper<<< nblocks, IB, 0, queue >>>( diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_dgemm16_part1_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm16_part2_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_dgemm32_part1_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm32_part2_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_dgemm64_part1_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm64_part2_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_dgemm_above64_part1_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part2_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part3_upper<<< grid, threads, 0, queue >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
/**
@see magmablas_dtrtri_diag_q
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const double *dA, magma_int_t ldda,
double *d_dinvA)
{
magmablas_dtrtri_diag_q( uplo, diag, n, dA, ldda, d_dinvA, magma_stream );
}
|
b37611b5b9d95bf4cad72229e06a4e0480dd4ac8.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "reduce_hip.cuh"
using namespace std;
int main(int argc, char* argv[]) {
int N = atoi(argv[1]);
int tpb = atoi(argv[2]);
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int* arr;
arr = new int[N];
for (int i = 0; i < N; i++) arr[i] = 1;
hipEventRecord(start);
int res = reduce(arr, N, tpb);
hipEventRecord(stop);
hipEventSynchronize(stop);
float ms;
hipEventElapsedTime(&ms, start, stop);
// for (int i = 0; i < N; i++)
// cout << out[i] << " ";
// cout << endl;
cout << res << endl;
cout << ms << endl;
delete[] arr;
return 0;
}
| b37611b5b9d95bf4cad72229e06a4e0480dd4ac8.cu | #include <iostream>
#include "reduce.cuh"
using namespace std;
int main(int argc, char* argv[]) {
int N = atoi(argv[1]);
int tpb = atoi(argv[2]);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* arr;
arr = new int[N];
for (int i = 0; i < N; i++) arr[i] = 1;
cudaEventRecord(start);
int res = reduce(arr, N, tpb);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
// for (int i = 0; i < N; i++)
// cout << out[i] << " ";
// cout << endl;
cout << res << endl;
cout << ms << endl;
delete[] arr;
return 0;
}
|
eac7c87c1d65bb0e5ed7094773e267885502fc96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/cudf.h>
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/cudf_utils.h>
#include <cudf/legacy/table.hpp>
#include <copying/gather.hpp>
#include <cudf/types.h>
#include <utilities/bit_util.cuh>
#include <utilities/cuda_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <utilities/column_utils.hpp>
#include <bitmask/legacy/bit_mask.cuh>
#include <reductions/reduction_functions.cuh>
#include <stream_compaction/copy_if.cuh>
using bit_mask::bit_mask_t;
namespace cudf {
namespace detail {
__global__ void invert_map(gdf_index_type gather_map[], const gdf_size_type destination_rows,
gdf_index_type const scatter_map[], const gdf_size_type source_rows){
gdf_index_type source_row = threadIdx.x + blockIdx.x * blockDim.x;
if(source_row < source_rows){
gdf_index_type destination_row = scatter_map[source_row];
if(destination_row < destination_rows){
gather_map[destination_row] = source_row;
}
}
}
void scatter(table const* source_table, gdf_index_type const scatter_map[],
table* destination_table) {
const gdf_size_type num_source_rows = source_table->num_rows();
const gdf_size_type num_destination_rows = destination_table->num_rows();
// Turn the scatter_map[] into a gather_map[] and then call gather(...).
// We are initializing the result gather_map with `num_source_rows`
// so if at the end the value is not modified we know the original
// scatter map does not map to this row, and we should keep whatever is
// there originally
CUDF_EXPECTS(nullptr != source_table, "source table is null");
CUDF_EXPECTS(nullptr != destination_table, "destination table is null");
if (0 == source_table->num_rows()) {
return;
}
CUDF_EXPECTS(nullptr != scatter_map, "scatter_map is null");
constexpr gdf_index_type default_index_value = -1;
rmm::device_vector<gdf_index_type> v_gather_map(num_destination_rows, default_index_value);
constexpr int block_size = 256;
const gdf_size_type invert_grid_size =
(destination_table->num_rows() + block_size - 1) / block_size;
hipLaunchKernelGGL(( detail::invert_map), dim3(invert_grid_size), dim3(block_size), 0, 0, v_gather_map.data().get(), num_destination_rows, scatter_map, num_source_rows);
// We want to check bounds for scatter since it is possible that
// some elements of the destination column are not modified.
detail::gather(source_table, v_gather_map.data().get(), destination_table, true, true);
}
template<bool mark_true>
__global__ void marking_bitmask_kernel(
bit_mask_t* destination_mask,
gdf_size_type num_destination_rows,
const gdf_index_type scatter_map[],
gdf_size_type num_scatter_rows
){
gdf_index_type row = threadIdx.x + blockIdx.x * blockDim.x;
while (row < num_scatter_rows) {
const gdf_index_type output_row = scatter_map[row];
if(mark_true){
bit_mask::set_bit_safe(destination_mask, output_row);
}else{
bit_mask::clear_bit_safe(destination_mask, output_row);
}
row += blockDim.x * gridDim.x;
}
}
struct scalar_scatterer {
/**---------------------------------------------------------------------------*
* @brief Type-dispatched function to scatter from one scalar to a table based
* on a `scatter_map`.
*
* @tparam ColumnType Dispatched type for the column being scattered
* @param source The scalar to scatter to
* @param scatter_map Array of indices that maps the source element to destination
* elements
* @param destination_column The column to gather into
* @param check_bounds Optionally perform bounds checking on the values of
* `gather_map`
* @param stream Optional CUDA stream on which to execute kernels
*---------------------------------------------------------------------------**/
template <typename ColumnType>
void operator()(gdf_scalar const& source,
gdf_index_type const scatter_map[], const gdf_size_type num_scatter_rows,
gdf_column* destination_column, hipStream_t stream = 0) {
const ColumnType source_data {
*reinterpret_cast<ColumnType const*>(&source.data) };
ColumnType* destination_data {
reinterpret_cast<ColumnType*>(destination_column->data) };
thrust::constant_iterator<ColumnType> const_iter(source_data);
thrust::scatter(rmm::exec_policy(stream)->on(stream), const_iter,
const_iter + num_scatter_rows, scatter_map,
destination_data);
CHECK_STREAM(stream);
}
};
void scalar_scatter(const std::vector<gdf_scalar>& source,
gdf_index_type const scatter_map[],
gdf_size_type num_scatter_rows, table* destination_table){
CUDF_EXPECTS(source.size() == (size_t)destination_table->num_columns(),
"scalar vector and destination table size mismatch.");
const int n_cols = source.size();
std::vector<cudf::util::cuda::scoped_stream> v_streams(2*n_cols);
// data part
for(int i = 0; i < n_cols; i++){
CUDF_EXPECTS(source[i].dtype == destination_table->get_column(i)->dtype,
"source/destination data type mismatch.");
CUDF_EXPECTS(source[i].dtype != GDF_STRING_CATEGORY,
"Scalar scatter currently does not support GDF_STRING_CATEGORY.");
type_dispatcher(source[i].dtype, scalar_scatterer{}, source[i],
scatter_map, num_scatter_rows, destination_table->get_column(i), v_streams[i]);
}
constexpr int block_size = 256;
const int grid_size = cudf::util::cuda::grid_config_1d(num_scatter_rows, block_size).num_blocks;
// bitmask part
for(int i = 0; i < n_cols; i++){
gdf_column* dest_col = destination_table->get_column(i);
if(dest_col->valid){
bit_mask_t* dest_valid = reinterpret_cast<bit_mask_t*>(dest_col->valid);
auto bitmask_kernel = source[i].is_valid ?
marking_bitmask_kernel<true> : marking_bitmask_kernel<false>;
hipLaunchKernelGGL(( bitmask_kernel), dim3(grid_size), dim3(block_size), 0, v_streams[i+n_cols],
dest_valid, dest_col->size, scatter_map, num_scatter_rows);
set_null_count(*dest_col);
}
}
}
inline bool validate_scatter_map(gdf_column const& scatter_map,
cudf::table const& input) {
CUDF_EXPECTS(scatter_map.dtype == GDF_INT32,
"scatter_map is not GDF_INT32 column.");
CUDF_EXPECTS(not cudf::has_nulls(scatter_map),
"Scatter map cannot contain null elements.");
CUDF_EXPECTS(scatter_map.size == input.num_rows(),
"scatter_map length is not equal to number of rows in input table.");
if (scatter_map.size == 0 ||
input.num_columns() == 0 ||
input.num_rows() == 0)
return false;
return true;
}
std::vector<cudf::table>
ordered_scatter_to_tables(cudf::table const& input,
gdf_index_type const* scatter_array,
gdf_index_type num_groups) {
std::vector<cudf::table> output_tables;
output_tables.reserve(num_groups);
for (gdf_index_type groupid = 0; groupid < num_groups; groupid++) {
output_tables.push_back(
detail::copy_if(input,
[scatter_array, groupid] __device__ (gdf_index_type row)
{ return groupid==scatter_array[row];
}));
}
return output_tables;
}
} // namespace detail
table scatter(table const& source, gdf_index_type const scatter_map[],
table const& target) {
const gdf_size_type n_cols = target.num_columns();
table output = copy(target);
for(int i = 0; i < n_cols; ++i){
// Allocate bitmask for each column
if(cudf::has_nulls(*source.get_column(i)) && !is_nullable(*target.get_column(i))){
gdf_size_type valid_size = gdf_valid_allocation_size(target.get_column(i)->size);
RMM_TRY(RMM_ALLOC(&output.get_column(i)->valid, valid_size, 0));
gdf_size_type valid_size_set = gdf_num_bitmask_elements(target.get_column(i)->size);
CUDA_TRY(hipMemset(output.get_column(i)->valid, 0xff, valid_size_set));
}
}
detail::scatter(&source, scatter_map, &output);
nvcategory_gather_table(output, output);
return output;
}
table scatter(std::vector<gdf_scalar> const& source,
gdf_index_type const scatter_map[],
gdf_size_type num_scatter_rows, table const& target){
const gdf_size_type n_cols = target.num_columns();
table output = copy(target);
for(int i = 0; i < n_cols; ++i){
// Allocate bitmask for each column
if(source[i].is_valid == false && !is_nullable(*target.get_column(i))){
gdf_size_type valid_size = gdf_valid_allocation_size(target.get_column(i)->size);
RMM_TRY(RMM_ALLOC(&output.get_column(i)->valid, valid_size, 0));
gdf_size_type valid_size_set = gdf_num_bitmask_elements(target.get_column(i)->size);
CUDA_TRY(hipMemset(output.get_column(i)->valid, 0xff, valid_size_set));
}
}
detail::scalar_scatter(source, scatter_map, num_scatter_rows, &output);
return output;
}
std::vector<cudf::table>
scatter_to_tables(cudf::table const& input, gdf_column const& scatter_map) {
if(not detail::validate_scatter_map(scatter_map, input))
return std::vector<cudf::table>();
gdf_index_type* scatter_array =
static_cast<gdf_index_type*>(scatter_map.data);
gdf_scalar max_elem = cudf::reduction::max(scatter_map, scatter_map.dtype);
gdf_index_type num_groups = max_elem.data.si32 + 1;
return detail::ordered_scatter_to_tables(input,
scatter_array,
num_groups);
}
} // namespace cudf
| eac7c87c1d65bb0e5ed7094773e267885502fc96.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/cudf.h>
#include <rmm/thrust_rmm_allocator.h>
#include <utilities/cudf_utils.h>
#include <cudf/legacy/table.hpp>
#include <copying/gather.hpp>
#include <cudf/types.h>
#include <utilities/bit_util.cuh>
#include <utilities/cuda_utils.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <utilities/column_utils.hpp>
#include <bitmask/legacy/bit_mask.cuh>
#include <reductions/reduction_functions.cuh>
#include <stream_compaction/copy_if.cuh>
using bit_mask::bit_mask_t;
namespace cudf {
namespace detail {
__global__ void invert_map(gdf_index_type gather_map[], const gdf_size_type destination_rows,
gdf_index_type const scatter_map[], const gdf_size_type source_rows){
gdf_index_type source_row = threadIdx.x + blockIdx.x * blockDim.x;
if(source_row < source_rows){
gdf_index_type destination_row = scatter_map[source_row];
if(destination_row < destination_rows){
gather_map[destination_row] = source_row;
}
}
}
void scatter(table const* source_table, gdf_index_type const scatter_map[],
table* destination_table) {
const gdf_size_type num_source_rows = source_table->num_rows();
const gdf_size_type num_destination_rows = destination_table->num_rows();
// Turn the scatter_map[] into a gather_map[] and then call gather(...).
// We are initializing the result gather_map with `num_source_rows`
// so if at the end the value is not modified we know the original
// scatter map does not map to this row, and we should keep whatever is
// there originally
CUDF_EXPECTS(nullptr != source_table, "source table is null");
CUDF_EXPECTS(nullptr != destination_table, "destination table is null");
if (0 == source_table->num_rows()) {
return;
}
CUDF_EXPECTS(nullptr != scatter_map, "scatter_map is null");
constexpr gdf_index_type default_index_value = -1;
rmm::device_vector<gdf_index_type> v_gather_map(num_destination_rows, default_index_value);
constexpr int block_size = 256;
const gdf_size_type invert_grid_size =
(destination_table->num_rows() + block_size - 1) / block_size;
detail::invert_map<<<invert_grid_size, block_size>>>(v_gather_map.data().get(), num_destination_rows, scatter_map, num_source_rows);
// We want to check bounds for scatter since it is possible that
// some elements of the destination column are not modified.
detail::gather(source_table, v_gather_map.data().get(), destination_table, true, true);
}
template<bool mark_true>
__global__ void marking_bitmask_kernel(
bit_mask_t* destination_mask,
gdf_size_type num_destination_rows,
const gdf_index_type scatter_map[],
gdf_size_type num_scatter_rows
){
gdf_index_type row = threadIdx.x + blockIdx.x * blockDim.x;
while (row < num_scatter_rows) {
const gdf_index_type output_row = scatter_map[row];
if(mark_true){
bit_mask::set_bit_safe(destination_mask, output_row);
}else{
bit_mask::clear_bit_safe(destination_mask, output_row);
}
row += blockDim.x * gridDim.x;
}
}
struct scalar_scatterer {
/**---------------------------------------------------------------------------*
* @brief Type-dispatched function to scatter from one scalar to a table based
* on a `scatter_map`.
*
* @tparam ColumnType Dispatched type for the column being scattered
* @param source The scalar to scatter to
* @param scatter_map Array of indices that maps the source element to destination
* elements
* @param destination_column The column to gather into
* @param check_bounds Optionally perform bounds checking on the values of
* `gather_map`
* @param stream Optional CUDA stream on which to execute kernels
*---------------------------------------------------------------------------**/
template <typename ColumnType>
void operator()(gdf_scalar const& source,
gdf_index_type const scatter_map[], const gdf_size_type num_scatter_rows,
gdf_column* destination_column, cudaStream_t stream = 0) {
const ColumnType source_data {
*reinterpret_cast<ColumnType const*>(&source.data) };
ColumnType* destination_data {
reinterpret_cast<ColumnType*>(destination_column->data) };
thrust::constant_iterator<ColumnType> const_iter(source_data);
thrust::scatter(rmm::exec_policy(stream)->on(stream), const_iter,
const_iter + num_scatter_rows, scatter_map,
destination_data);
CHECK_STREAM(stream);
}
};
void scalar_scatter(const std::vector<gdf_scalar>& source,
gdf_index_type const scatter_map[],
gdf_size_type num_scatter_rows, table* destination_table){
CUDF_EXPECTS(source.size() == (size_t)destination_table->num_columns(),
"scalar vector and destination table size mismatch.");
const int n_cols = source.size();
std::vector<cudf::util::cuda::scoped_stream> v_streams(2*n_cols);
// data part
for(int i = 0; i < n_cols; i++){
CUDF_EXPECTS(source[i].dtype == destination_table->get_column(i)->dtype,
"source/destination data type mismatch.");
CUDF_EXPECTS(source[i].dtype != GDF_STRING_CATEGORY,
"Scalar scatter currently does not support GDF_STRING_CATEGORY.");
type_dispatcher(source[i].dtype, scalar_scatterer{}, source[i],
scatter_map, num_scatter_rows, destination_table->get_column(i), v_streams[i]);
}
constexpr int block_size = 256;
const int grid_size = cudf::util::cuda::grid_config_1d(num_scatter_rows, block_size).num_blocks;
// bitmask part
for(int i = 0; i < n_cols; i++){
gdf_column* dest_col = destination_table->get_column(i);
if(dest_col->valid){
bit_mask_t* dest_valid = reinterpret_cast<bit_mask_t*>(dest_col->valid);
auto bitmask_kernel = source[i].is_valid ?
marking_bitmask_kernel<true> : marking_bitmask_kernel<false>;
bitmask_kernel<<<grid_size, block_size, 0, v_streams[i+n_cols]>>>
(dest_valid, dest_col->size, scatter_map, num_scatter_rows);
set_null_count(*dest_col);
}
}
}
inline bool validate_scatter_map(gdf_column const& scatter_map,
cudf::table const& input) {
CUDF_EXPECTS(scatter_map.dtype == GDF_INT32,
"scatter_map is not GDF_INT32 column.");
CUDF_EXPECTS(not cudf::has_nulls(scatter_map),
"Scatter map cannot contain null elements.");
CUDF_EXPECTS(scatter_map.size == input.num_rows(),
"scatter_map length is not equal to number of rows in input table.");
if (scatter_map.size == 0 ||
input.num_columns() == 0 ||
input.num_rows() == 0)
return false;
return true;
}
std::vector<cudf::table>
ordered_scatter_to_tables(cudf::table const& input,
gdf_index_type const* scatter_array,
gdf_index_type num_groups) {
std::vector<cudf::table> output_tables;
output_tables.reserve(num_groups);
for (gdf_index_type groupid = 0; groupid < num_groups; groupid++) {
output_tables.push_back(
detail::copy_if(input,
[scatter_array, groupid] __device__ (gdf_index_type row)
{ return groupid==scatter_array[row];
}));
}
return output_tables;
}
} // namespace detail
table scatter(table const& source, gdf_index_type const scatter_map[],
table const& target) {
const gdf_size_type n_cols = target.num_columns();
table output = copy(target);
for(int i = 0; i < n_cols; ++i){
// Allocate bitmask for each column
if(cudf::has_nulls(*source.get_column(i)) && !is_nullable(*target.get_column(i))){
gdf_size_type valid_size = gdf_valid_allocation_size(target.get_column(i)->size);
RMM_TRY(RMM_ALLOC(&output.get_column(i)->valid, valid_size, 0));
gdf_size_type valid_size_set = gdf_num_bitmask_elements(target.get_column(i)->size);
CUDA_TRY(cudaMemset(output.get_column(i)->valid, 0xff, valid_size_set));
}
}
detail::scatter(&source, scatter_map, &output);
nvcategory_gather_table(output, output);
return output;
}
table scatter(std::vector<gdf_scalar> const& source,
gdf_index_type const scatter_map[],
gdf_size_type num_scatter_rows, table const& target){
const gdf_size_type n_cols = target.num_columns();
table output = copy(target);
for(int i = 0; i < n_cols; ++i){
// Allocate bitmask for each column
if(source[i].is_valid == false && !is_nullable(*target.get_column(i))){
gdf_size_type valid_size = gdf_valid_allocation_size(target.get_column(i)->size);
RMM_TRY(RMM_ALLOC(&output.get_column(i)->valid, valid_size, 0));
gdf_size_type valid_size_set = gdf_num_bitmask_elements(target.get_column(i)->size);
CUDA_TRY(cudaMemset(output.get_column(i)->valid, 0xff, valid_size_set));
}
}
detail::scalar_scatter(source, scatter_map, num_scatter_rows, &output);
return output;
}
std::vector<cudf::table>
scatter_to_tables(cudf::table const& input, gdf_column const& scatter_map) {
if(not detail::validate_scatter_map(scatter_map, input))
return std::vector<cudf::table>();
gdf_index_type* scatter_array =
static_cast<gdf_index_type*>(scatter_map.data);
gdf_scalar max_elem = cudf::reduction::max(scatter_map, scatter_map.dtype);
gdf_index_type num_groups = max_elem.data.si32 + 1;
return detail::ordered_scatter_to_tables(input,
scatter_array,
num_groups);
}
} // namespace cudf
|
f73ff02d1f8df241f7014ace76d2109ba4968b89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "base/Range.hh"
__global__ void move(const int max_steps, double distance, double* x) {
auto start = (blockIdx.x * blockDim.x + threadIdx.x) * max_steps;
for (int i = 0; i < max_steps; ++i) {
x[start + i] += distance;
}
}
| f73ff02d1f8df241f7014ace76d2109ba4968b89.cu | #include "base/Range.hh"
__global__ void move(const int max_steps, double distance, double* x) {
auto start = (blockIdx.x * blockDim.x + threadIdx.x) * max_steps;
for (int i = 0; i < max_steps; ++i) {
x[start + i] += distance;
}
}
|
RangeToWorld.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* RangeToWorld.cu
*
* Created on: 2012
* Author: sk
*/
#include "Common.cuh"
__global__ void rangeToWorldKernel(const CameraOptions camera_opt,
const FrameDataCUDA data) {
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint i = y * data.width + x;
uint p_i = i * 3;
// change depth direction, so it would correspond to openGL coords
float3 d = make_float3(x, y, -data.depth[i]);
float3 p = getWorldCoordinate(camera_opt.t,
camera_opt.ref_pix_size, camera_opt.ref_distance, d);
// 3d space filtering
if (p.x > camera_opt.min[0] && p.x < camera_opt.max[0] &&
p.y > camera_opt.min[1] && p.y < camera_opt.max[1] &&
p.z > camera_opt.min[2] && p.z < camera_opt.max[2]) {
data.points[p_i] = p.x;
data.points[p_i + 1] = p.y;
data.points[p_i + 2] = p.z;
} else {
data.points[p_i] = 0.0f;
data.points[p_i + 1] = 0.0f;
data.points[p_i + 2] = 0.0f;
}
}
__global__ void getNormalsKernel(const CameraOptions camera_opt,
const FrameDataCUDA data) {
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint i = y * data.width + x;
uint p_i = i * 3;
// estimate a normal
float3 p1 = getPoint(data.points, x + 1, y, data.width);
float3 p2 = getPoint(data.points, x, y + 1, data.width);
float3 p3 = getPoint(data.points, x, y, data.width);
float3 normal;
if (p1.z == 0.0 || p2.z == 0 || p3.z == 0) {
normal.x = 0.0f;
normal.y = 0.0f;
normal.z = 0.0f;
} else {
float3 v1 = p1 - p3;
float3 v2 = p2 - p3;
normal = cross(v1, v2);
normal = normalize(normal);
}
data.normals[p_i] = normal.x;
data.normals[p_i + 1] = normal.y;
data.normals[p_i + 2] = normal.z;
}
void rangeToWorldCUDA(CameraOptions camera_opt, FrameDataCUDA data) {
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(divUp(data.width, threadsPerBlock.x),
divUp(data.height, threadsPerBlock.y));
hipLaunchKernelGGL(( rangeToWorldKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, camera_opt, data);
cutilSafeCall(cutilDeviceSynchronize());
hipLaunchKernelGGL(( getNormalsKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, camera_opt, data);
cutilSafeCall(cutilDeviceSynchronize());
}
| RangeToWorld.cu | /*
* RangeToWorld.cu
*
* Created on: 2012
* Author: sk
*/
#include "Common.cuh"
__global__ void rangeToWorldKernel(const CameraOptions camera_opt,
const FrameDataCUDA data) {
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint i = y * data.width + x;
uint p_i = i * 3;
// change depth direction, so it would correspond to openGL coords
float3 d = make_float3(x, y, -data.depth[i]);
float3 p = getWorldCoordinate(camera_opt.t,
camera_opt.ref_pix_size, camera_opt.ref_distance, d);
// 3d space filtering
if (p.x > camera_opt.min[0] && p.x < camera_opt.max[0] &&
p.y > camera_opt.min[1] && p.y < camera_opt.max[1] &&
p.z > camera_opt.min[2] && p.z < camera_opt.max[2]) {
data.points[p_i] = p.x;
data.points[p_i + 1] = p.y;
data.points[p_i + 2] = p.z;
} else {
data.points[p_i] = 0.0f;
data.points[p_i + 1] = 0.0f;
data.points[p_i + 2] = 0.0f;
}
}
__global__ void getNormalsKernel(const CameraOptions camera_opt,
const FrameDataCUDA data) {
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint i = y * data.width + x;
uint p_i = i * 3;
// estimate a normal
float3 p1 = getPoint(data.points, x + 1, y, data.width);
float3 p2 = getPoint(data.points, x, y + 1, data.width);
float3 p3 = getPoint(data.points, x, y, data.width);
float3 normal;
if (p1.z == 0.0 || p2.z == 0 || p3.z == 0) {
normal.x = 0.0f;
normal.y = 0.0f;
normal.z = 0.0f;
} else {
float3 v1 = p1 - p3;
float3 v2 = p2 - p3;
normal = cross(v1, v2);
normal = normalize(normal);
}
data.normals[p_i] = normal.x;
data.normals[p_i + 1] = normal.y;
data.normals[p_i + 2] = normal.z;
}
void rangeToWorldCUDA(CameraOptions camera_opt, FrameDataCUDA data) {
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(divUp(data.width, threadsPerBlock.x),
divUp(data.height, threadsPerBlock.y));
rangeToWorldKernel<<<numBlocks, threadsPerBlock>>>(camera_opt, data);
cutilSafeCall(cutilDeviceSynchronize());
getNormalsKernel<<<numBlocks, threadsPerBlock>>>(camera_opt, data);
cutilSafeCall(cutilDeviceSynchronize());
}
|
7389569f3d9e68e0e7f1a70f76850c1fc11eba33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* File: maingpu.cu
* Author: jjbillings
*
* Created on October 16, 2016, 9:09 PM
*/
#include<cstdlib>
#include<stdio.h>
#include<queue>
#include<stack>
#include<iostream>
#include<fstream>
#include<ctime>
#include"nets.h"
using namespace std;
#define NUM_CONNECTIONS 1000
#define CONNECTIONS 1000
#define MAX_CHANNELS 500
#define SAMPLES 1
struct SimplePath;
struct Path;
struct Edge;
struct Connection;
struct Channel;
struct Channel{
bool primary; //is this channel used for a primary path?
int numBackups; //total protected;
Connection *backupsOnChannel[NUM_CONNECTIONS];//Realistically, there will be far fewer than NUM_CONNECTIONS
Connection *d_backupsOnChannel[NUM_CONNECTIONS];
};
struct Edge {
int edgeNum;
int v1;
int v2;
int load; //load <= MAX_CHANNELS. Also, load is the sum of the primary AND backups paths using it.
int totalProtected;
};
struct SimplePath {
int sourceNode;
int destNode;
int hops;
int index;
int edgeNums[N_NODES];
Edge *edges[N_NODES];
SimplePath() {
for(int i = 0; i < N_NODES; ++i) {
edgeNums[i] = -1;
edges[i] = 0;
}
sourceNode = -1;
destNode = -1;
hops = -1;
index = -1;
};
};
struct Path {
int sourceNode;
int destNode;
int hops;
int index;
int cost;
//Every path that uses a particular edge just has a reference to it (not a copy), so they can each manipulate it.
Edge *edges[N_NODES];
bool freeEdges[N_NODES]; //whether or not that edge has a cost of 0
int channelNum[N_NODES]; //Channel number for each edge that it uses
int edgeNums[N_NODES];
bool primary;
bool active;
};
struct Connection {
int sourceNode;
int destNode;
int combinedCost;
bool validBackup;
bool validPrimary;
Path backupPath;
Path primaryPath;
};
void readGraphReorderEdgeList(int vertexList[],Edge compactEdgeList[2*N_EDGES],Edge reorderedEdgeList[2*N_NODES]);
int computeAllSimplePathsN(SimplePath **ps, int *vertexList, Edge *edgeList, int sourceNode, int destNode, int hops);
void simulate(int *vertexList, Edge *edgeList);
void simulate_GPU(int *vertexList, Edge *edgeList);
void computeCostForBackupsWithGPU(SimplePath *p, int *potPathCosts, int primaryInd, Channel cs[2*N_EDGES][MAX_CHANNELS]);
int determineCompatibleBackups(SimplePath *p, int *potPathInd, int numPossiblePaths, int pInd);
void computeCostForBackups(SimplePath *p, int *potPathInd, int numPotPaths, int backupIndex, int *pathCosts,Channel cs[2*N_EDGES][MAX_CHANNELS]);
void selectChannels(Connection *c, Channel chan[2*N_EDGES][MAX_CHANNELS]);
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS], Connection *d_con);
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS]);
void prefilterCompatibleBackups(SimplePath *p, int *filteredPaths, int *numCompatPaths, int numPossiblePaths, int src, int dest);
int vertexList[N_NODES+1];
Edge edgeList[2*N_EDGES];
Edge reorderedEdgeList[2*N_EDGES];
Connection cons[NUM_CONNECTIONS];
Channel channels[2*N_EDGES][MAX_CHANNELS];
//-----------Kernel for Determining which Backups are compatible with which Primaries. WORKING---------//
__global__ void determineCompatibleBackups(SimplePath *ps, int *potPathCosts,int conInd){
int p_ind = (conInd * NUM_CONNECTIONS) + blockIdx.x;
int b_ind = (conInd * NUM_CONNECTIONS) + threadIdx.x;
int output_ind = (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x;
int primIndex = ps[p_ind].index;
int backIndex = ps[b_ind].index;
int primHops = ps[p_ind].hops;
int backHops = ps[b_ind].hops;
if(primHops > 0 && backHops > 0) {
bool disjoint = true;
for(int e1 = 0; disjoint && e1 <= primIndex; ++e1) {
for(int e2 = 0; disjoint && e2 <= backIndex; ++e2){
if(ps[p_ind].edgeNums[e1] == ps[b_ind].edgeNums[e2]) {
disjoint = false;
}
}
}
if(disjoint) {
potPathCosts[output_ind] = 1;
}else {
potPathCosts[output_ind] = -1;
}
}else {
potPathCosts[output_ind] = -1;
}
}
//---------Kernel for computing the cost of each primary/backup combo. WORKING -------//
__global__ void costsKernel(SimplePath *p, int *potPathCosts, int conInd , Channel *cs) {
int p_ind = (conInd * NUM_CONNECTIONS) + blockIdx.x;
int b_ind = (conInd * NUM_CONNECTIONS) + threadIdx.x;
int index = (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x;
//If we already know that this combo is unusable, just quit.
if(potPathCosts[index] == -1) {
return;
}
int cost = 0;
for(int e = 0; e <= p[b_ind].index; ++e) {
bool free = false;
int edgeNum = p[b_ind].edgeNums[e];
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
int channelIndex = (edgeNum * MAX_CHANNELS)+c;
if(cs[channelIndex].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[channelIndex].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < cs[channelIndex].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[p_ind].index; ++e3 ) {
if((*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.edgeNums[e2] == p[p_ind].edgeNums[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
potPathCosts[index] = cost;
}
//---------Kernel for computing the cost of each primary/backup combo using the list of filtered paths. WORKING-------//
__global__ void filteredCostsKernel(SimplePath *p, int *potPathCosts, int conInd , Channel *cs, int *numCompatPaths, int *filteredPaths) {
int p_ind = (conInd * NUM_CONNECTIONS) + blockIdx.x;
int index = (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x;
if(threadIdx.x >= numCompatPaths[p_ind]) {
return;
}
int b_ind = filteredPaths[(conInd * NUM_CONNECTIONS * NUM_CONNECTIONS) + (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x];
int cost = 0;
for(int e = 0; e <= p[b_ind].index; ++e) {
bool free = false;
int edgeNum = p[b_ind].edgeNums[e];
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
int channelIndex = (edgeNum * MAX_CHANNELS)+c;
if(cs[channelIndex].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[channelIndex].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
break;
//continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < cs[channelIndex].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[p_ind].index; ++e3 ) {
if((*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.edgeNums[e2] == p[p_ind].edgeNums[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
potPathCosts[index] = cost;
}
/*
*TODO: I totally thought I made the algorithm be based on BFS, but it is in fact based on DFS.
*So REVERSE the order of the edge list. Currently, the neighbor with the lowest degree gets pushed
*to the "bottom" of the stack, so we end up computing the path with high-degree nodes in it...
*/
int main(int argc, char** argv) {
cout <<"Welcome to main\n";
hipFree(0);
for(int f = 0; f < (2*N_EDGES); ++f){
for(int g = 0; g < MAX_CHANNELS; ++g) {
channels[f][g].numBackups = 0;
channels[f][g].primary = false;
}
}
readGraphReorderEdgeList(vertexList,edgeList,reorderedEdgeList);
srand(time(NULL));
simulate_GPU(vertexList,edgeList);
//simulate(vertexList,edgeList);
return 0;
}
void simulate_GPU(int *vertexList, Edge *edgeList){
clock_t cpu_startTime, cpu_endTime;
double cpu_elapsedTime = 0;
float gpu_totalTime = 0;
int connectionNum = 0;
//Sizes for storage
const size_t sp_size = sizeof(SimplePath);
const size_t potPathCosts_size = (NUM_CONNECTIONS * NUM_CONNECTIONS) * sizeof(int);
const size_t ps_size = ((N_NODES*N_NODES)*NUM_CONNECTIONS)*sp_size; //Size of the entire 2D array
const size_t row_size = NUM_CONNECTIONS*sp_size; //Size of a SINGLE row in the array of SimplePaths
const size_t channels_size = ((2*N_EDGES)*MAX_CHANNELS)*sizeof(Channel);
const size_t filtered_compat_paths_size = (N_NODES*N_NODES*NUM_CONNECTIONS*NUM_CONNECTIONS*sizeof(int));
const size_t numPaths_size = N_NODES*N_NODES*sizeof(int);
const size_t numCompatPaths_size = N_NODES*N_NODES*NUM_CONNECTIONS*sizeof(int);
//Test Data
int v1[40] = {9, 5, 6, 1, 3, 5, 4, 9, 9, 9, 7, 8, 2, 10, 3, 5, 9, 3, 2, 3, 5, 2, 3, 3, 10, 9, 10, 2, 1, 1, 3, 2, 9, 5, 4, 6, 10, 5, 0, 1};
int v2[40] = {3, 8, 4, 3, 8, 3, 7, 1, 5, 6, 0, 6, 10, 5, 8, 2, 3, 6, 5, 4, 2, 3, 9, 7, 9, 5, 6, 5, 0, 2, 5, 5, 10, 3, 9, 3, 4, 1, 10, 2};
SimplePath **ps = new SimplePath*[N_NODES * N_NODES]; //Host pointer for paths storage
SimplePath *d_ps; //Device pointer for the array of SimplePaths
int *d_potPathCosts; //Device pointer for the array of Potential Path Costs
int *h_potPathCosts; //Host pointer for the array of potential path costs.
Connection *d_cons; //Device pointer to the array of connections.
Channel *d_channels; //Device pointer for the array of channels.
int *h_filteredPaths; //Host pointer for the flattened 3D array of paths which are filtered based on compatibility
int *d_filteredPaths; //Device pointer for filtered paths
int *numPosPaths; //Host pointer for array containing the number of paths for each src/dest pair
int *h_numCompatPaths; //Host pointer for the flattened 2D array
int *d_numCompatPaths;
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
ps[i] = new SimplePath[NUM_CONNECTIONS];
}
if(hipSuccess != hipMalloc((void **)&d_ps,ps_size)) {
cout << "Malloc Error\n";
}else {
cout << "allocated SimplePaths array on Device\n";
}
if(hipSuccess != hipMalloc((void **)&d_channels,channels_size)) {
cout << "Error Allocating channels on GPU\n";
}else {
cout << "Allocated Channels array on GPU\n";
}
hipMalloc((void **)&d_cons,sizeof(Connection)*NUM_CONNECTIONS);
hipMalloc((void **)&d_potPathCosts,potPathCosts_size);
hipMalloc((void **)&d_filteredPaths,filtered_compat_paths_size);
hipMalloc((void **)&d_numCompatPaths,numCompatPaths_size);
hipMemcpy(d_channels,&channels,channels_size,hipMemcpyHostToDevice);
h_potPathCosts = (int *)malloc(potPathCosts_size);
h_filteredPaths = (int *)malloc(filtered_compat_paths_size);
numPosPaths = (int *)malloc(numPaths_size);
h_numCompatPaths = (int *)malloc(numCompatPaths_size);
//Compute all simple paths
for(int src = 0; src < N_NODES; ++src) {
for(int dest = 0; dest < N_NODES; ++dest) {
if(src != dest) {
int index = (src*N_NODES)+dest;
numPosPaths[index] = computeAllSimplePathsN(ps,vertexList,edgeList,src,dest,N_NODES);
}else {
numPosPaths[(src*N_NODES)+dest] = 0; //Added so numPosPaths would have a real value for cases when src=dest.
}
}
}
//Filter the compatible paths
for(int src = 0; src < N_NODES; ++src) {
for(int dest = 0; dest < N_NODES; ++dest) {
int index = (src * N_NODES) + dest;
if(src != dest){
prefilterCompatibleBackups(ps[index], h_filteredPaths, h_numCompatPaths, numPosPaths[index], src, dest);
}
}
}
//Copy Simple paths to the GPU
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
hipMemcpy(d_ps + (i*(NUM_CONNECTIONS)),ps[i],row_size,hipMemcpyHostToDevice);
}
//Copy filtered paths to the GPU
hipMemcpy(d_filteredPaths, h_filteredPaths, filtered_compat_paths_size, hipMemcpyHostToDevice);
//Copy compatible paths array to GPU
hipMemcpy(d_numCompatPaths, h_numCompatPaths, numCompatPaths_size, hipMemcpyHostToDevice);
//Setup components for GPU benchmarking.
hipEvent_t start, stop;
cpu_startTime = clock();
for(int c = 0; c < CONNECTIONS; ++c) {
//Attempt to allocate SOME connection onto the network
//int s = v1[connectionNum];
//int d = v2[connectionNum];
int s = 0;
int d = 0;
while(s == d) {
s = rand()%N_NODES;
d = rand()%N_NODES;
}
hipEventCreate(&start);
hipEventCreate(&stop);
int index = (s*N_NODES) + d;
//BENCHMARKING
hipEventRecord(start);
//--------Launch the Kernel---------//
hipLaunchKernelGGL(( filteredCostsKernel), dim3(numPosPaths[index]),dim3(numPosPaths[index]), 0, 0, d_ps, d_potPathCosts, index, d_channels, d_numCompatPaths, d_filteredPaths);
//BENCHMARKING
hipEventRecord(stop);
hipEventSynchronize(stop);
float milli = 0;
hipEventElapsedTime(&milli,start,stop);
gpu_totalTime += milli;
hipEventDestroy(start);
hipEventDestroy(stop);
hipError_t error_code = hipGetLastError();
if(hipSuccess != error_code) {
cout << "CUDA ERROR IN KERNEL: " << error_code << "\n";
cout << "ERROR: " << hipGetErrorString(error_code) << "\n";
}
//---------Copy the Results back to the host ---//
hipMemcpy(h_potPathCosts,d_potPathCosts,potPathCosts_size,hipMemcpyDeviceToHost);
//-----------Select the cheapest combo using GPU Results-----------//
int minCostGPU = 100000000;
int minPrimIndGPU = -1;
int minBackIndGPU = -1;
for(int p = 0; p < numPosPaths[index]; ++p) {
int primaryCostGPU = ps[index][p].hops;
for(int b = 0; b < h_numCompatPaths[(index*NUM_CONNECTIONS)+p]; ++b) {
int f = (p*NUM_CONNECTIONS)+b;
if(h_potPathCosts[(p*NUM_CONNECTIONS)+b] < 0) {
continue;
}
if((h_potPathCosts[(p*NUM_CONNECTIONS)+b] + primaryCostGPU) < minCostGPU) {
minCostGPU = (h_potPathCosts[(p*NUM_CONNECTIONS)+b] + primaryCostGPU);
minPrimIndGPU = p;
minBackIndGPU = h_filteredPaths[(index*NUM_CONNECTIONS*NUM_CONNECTIONS)+(p*NUM_CONNECTIONS)+b]-(index*NUM_CONNECTIONS);
}
}
}
cout << "Min cost on GPU is: " << minCostGPU << "\n";
cout << "PRIM: "<<minPrimIndGPU << "\n";
for(int i = 0; i <= ps[index][minPrimIndGPU].index; ++i) {
cout << (*ps[index][minPrimIndGPU].edges[i]).v1 << " -> " << (*ps[index][minPrimIndGPU].edges[i]).v2 << "\n";
}
cout << "BACK: " << minBackIndGPU << "\n";
for(int i = 0; i <= ps[index][minBackIndGPU].index; ++i) {
cout << (*ps[index][minBackIndGPU].edges[i]).v1 << " -> " << (*ps[index][minBackIndGPU].edges[i]).v2 << "\n";
}
//--------------Store the connection--------------//
cons[connectionNum].sourceNode = s;
cons[connectionNum].destNode = d;
cons[connectionNum].combinedCost = minCostGPU;
cons[connectionNum].validBackup = true;
cons[connectionNum].validPrimary = true;
cons[connectionNum].primaryPath.hops = ps[index][minPrimIndGPU].hops;
cons[connectionNum].primaryPath.index = ps[index][minPrimIndGPU].index;
cons[connectionNum].primaryPath.primary = true;
cons[connectionNum].backupPath.hops = ps[index][minBackIndGPU].hops;
cons[connectionNum].backupPath.index = ps[index][minBackIndGPU].index;
for(int p = 0; p <= ps[index][minPrimIndGPU].index; ++p) {
cons[connectionNum].primaryPath.edges[p] = ps[index][minPrimIndGPU].edges[p];
cons[connectionNum].primaryPath.freeEdges[p] = false;
cons[connectionNum].primaryPath.edgeNums[p] = ps[index][minPrimIndGPU].edgeNums[p];
}
for(int p = 0; p <= ps[index][minBackIndGPU].index; ++p) {
cons[connectionNum].backupPath.edges[p] = ps[index][minBackIndGPU].edges[p];
cons[connectionNum].backupPath.edgeNums[p] = ps[index][minBackIndGPU].edgeNums[p];
}
//Select the appropriate Channels for the selected connection
selectChannels(&cons[connectionNum],channels);
//Increase the network load
increaseLoad(&cons[connectionNum],channels,&d_cons[connectionNum]);
//NOTE: We can 100% only copy individual channels to the GPU. i.e. if only channels 3 and 41 were updated, we can copy ONLY those channels if we want to
hipMemcpy(d_channels,&channels,channels_size,hipMemcpyHostToDevice);
hipMemcpy(&d_cons[connectionNum],&cons[connectionNum],sizeof(Connection),hipMemcpyHostToDevice);
//--------------Print Network Load--------------//
for(int m = 0; m < 2*N_EDGES; ++m) {
cout << "LOAD: " << edgeList[m].v1 << " -> " << edgeList[m].v2 << ": " << edgeList[m].load << " | TP: " << edgeList[m].totalProtected << " | ";
if(edgeList[m].load > 0) {
for(int c = 0; c < edgeList[m].load; ++c) {
cout << "C" << c << ": " << channels[m][c].numBackups << " ";
if(channels[m][c].primary == true) {
cout << "P ";
}
}
}
cout << "\n";
}
connectionNum++;
}//ENDFOR
hipDeviceSynchronize();
cpu_endTime = clock();
//--------------Clean up memory--------------//
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
delete[] ps[i];
}
delete[] ps;
hipFree(d_ps);
hipFree(d_potPathCosts);
hipFree(d_channels);
hipFree(d_cons);
hipFree(d_filteredPaths);
hipFree(d_numCompatPaths);
free(h_potPathCosts);
free(h_filteredPaths);
free(numPosPaths);
free(h_numCompatPaths);
cpu_elapsedTime = ((double) (cpu_endTime - cpu_startTime)/CLOCKS_PER_SEC) * 1000;
cout << "Kernel Execution took: " << gpu_totalTime << " milliseconds\n";
cout << "Total time: " << cpu_elapsedTime << " milliseconds\n";
}
void prefilterCompatibleBackups(SimplePath *p, int *filteredPaths, int *numCompatPaths, int numPossiblePaths, int src, int dest) {
int numDisjoint = 0;
int numConf = 0;
for(int pInd = 0; pInd < numPossiblePaths; ++pInd) {
for(int bInd = 0; bInd < numPossiblePaths; ++bInd) {
if(p[pInd].hops > 0 && p[bInd].hops > 0) {
bool disjoint = true;
for(int e1 = 0; disjoint && e1 <= p[pInd].index; ++e1) {
for(int e2 = 0; disjoint && e2 <= p[bInd].index; ++e2) {
if(p[bInd].edgeNums[e2] == p[pInd].edgeNums[e1]) {
disjoint = false;
numConf++;
}
}
}
if(disjoint) {
int filteredIndex = (NUM_CONNECTIONS*NUM_CONNECTIONS*((src*N_NODES)+dest)) + (pInd*NUM_CONNECTIONS) + numDisjoint;
filteredPaths[filteredIndex] = (((src*N_NODES)+dest)*NUM_CONNECTIONS)+bInd;//Index for this compatible backup path.
numDisjoint++;
}
}
}
//Done checking all backups for this primary
int index = (((src*N_NODES)+dest)*NUM_CONNECTIONS) + pInd;
numCompatPaths[index] = numDisjoint;
numDisjoint = 0;
}
}
//-----------No longer using this method, since we have switched to GPU---------//
void computeCostForBackupsWithGPU(SimplePath *p, int *potPathCosts, int primaryInd, Channel cs[2*N_EDGES][MAX_CHANNELS]) {
for(int i = 0; i < NUM_CONNECTIONS; ++i) {
int pid = (primaryInd * NUM_CONNECTIONS) + i;
if(potPathCosts[pid] == -1) {
continue;
}
int cost = 0;
for(int e = 0; e <= p[i].index; ++e) {
bool free = false;
int edgeNum = (*p[i].edges[e]).edgeNum;
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
if(cs[edgeNum][c].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[edgeNum][c].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < channels[edgeNum][c].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[primaryInd].index; ++e3 ) {
if((*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.edges[e2] == p[primaryInd].edges[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
potPathCosts[pid] = cost;
}
}
void simulate(int *vertexList, Edge *edgeList){
clock_t cpu_startTime, cpu_endTime;
double cpu_elapsedTime = 0;
//cpu_startTime = clock();
//Test Data
int v1[40] = {9, 5, 6, 1, 3, 5, 4, 9, 9, 9, 7, 8, 2, 10, 3, 5, 9, 3, 2, 3, 5, 2, 3, 3, 10, 9, 10, 2, 1, 1, 3, 2, 9, 5, 4, 6, 10, 5, 0, 1};
int v2[40] = {3, 8, 4, 3, 8, 3, 7, 1, 5, 6, 0, 6, 10, 5, 8, 2, 3, 6, 5, 4, 2, 3, 9, 7, 9, 5, 6, 5, 0, 2, 5, 5, 10, 3, 9, 3, 4, 1, 10, 2};
int connectionNum = 0;
//We want to compute and store all possible paths between our source and desitination.
SimplePath **ps = new SimplePath*[N_NODES * N_NODES]; //Storage for paths
int *npaths = new int[N_NODES*N_NODES];
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
ps[i] = new SimplePath[NUM_CONNECTIONS];
}
//We COULD parallelize this by giving a thread a source/dest combo to compute the paths of. potentially beneficial for large graphs
for(int src = 0; src < N_NODES; ++src) {
for(int dest = 0; dest < N_NODES; ++dest) {
if(src != dest) {
int index = (src*N_NODES)+dest;
npaths[index] = computeAllSimplePathsN(ps,vertexList,edgeList,src,dest,N_NODES);
//cout <<"All simple paths computed and stored! " << npaths[index] << " paths between " << src << " and " << dest << "\n";
}
}
}
//At this point, we COULD delete[] any paths in the array that we didn't use.
cpu_startTime = clock();
for(int num = 0; num < CONNECTIONS; ++num) {
//Attempt to allocate SOME connection onto the network
//int s = v1[connectionNum];
//int d = v2[connectionNum];
int s = rand() % N_NODES;
int d = rand() % N_NODES;
while(s == d) {
s = rand()%N_NODES;
d = rand()%N_NODES;
}
//Allocate storage for the potential primary/backup path combos
int index = (s*N_NODES) + d;
int numPossiblePaths = npaths[index];
//Stores indices into the ps[index][] array for each disjoint backup path.
//potPathInd[i][j] = k where ps[index][k] is a path that is edge-disjoint from ps[index][i].
int ** potPathInd = new int*[NUM_CONNECTIONS];
for(int i = 0; i < NUM_CONNECTIONS; ++i) {
potPathInd[i] = new int[NUM_CONNECTIONS];
}
//--------------Find all paths which are edge-disjoint from this primary--------------//
int k = -1;
//On the GPU, instead of iterating i..numPossiblePaths, we would give thread_i backup_i
for(int i = 0; i < numPossiblePaths; ++i) {
k = determineCompatibleBackups(ps[index],potPathInd[i],numPossiblePaths,i);
//cout << "Number of paths which are disjoint from this primary path: " << k << "\n";
}
//--------------Compute Cost for each backup path--------------//
int ** pathCosts = new int*[numPossiblePaths];
for(int i = 0; i < numPossiblePaths; ++i) {
pathCosts[i] = new int[numPossiblePaths];
}
for(int i = 0; i < numPossiblePaths; ++i) {
computeCostForBackups(ps[index],potPathInd[i],numPossiblePaths,i,pathCosts[i],channels);
}
//--------------Select cheapest connection--------------//
int minCost = 100000000;
int minPrimInd = -1;
int minBackInd = -1;
for(int p = 0; p < numPossiblePaths; ++p) {
int backInd = 0;
int primaryCost = ps[index][p].hops;
while(pathCosts[p][backInd] != -1) {
if((pathCosts[p][backInd] + primaryCost) < minCost) {
minCost = (pathCosts[p][backInd] + primaryCost);
minPrimInd = p;
minBackInd = backInd;
}
backInd++;
}
}
cout << "Min cost is: " << minCost << "\n";
//--------------Store the connection--------------//
cons[connectionNum].sourceNode = s;
cons[connectionNum].destNode = d;
cons[connectionNum].combinedCost = minCost;
cons[connectionNum].validBackup = true;
cons[connectionNum].validPrimary = true;
//cons[connectionNum].backupPath = new Path();
//cons[connectionNum].primaryPath = new Path();
cons[connectionNum].primaryPath.hops = ps[index][minPrimInd].hops;
cons[connectionNum].primaryPath.index = ps[index][minPrimInd].index;
cons[connectionNum].primaryPath.primary = true;
cons[connectionNum].backupPath.hops = ps[index][potPathInd[minPrimInd][minBackInd]].hops;
cons[connectionNum].backupPath.index = ps[index][potPathInd[minPrimInd][minBackInd]].index;
for(int p = 0; p <= ps[index][minPrimInd].index; ++p) {
cons[connectionNum].primaryPath.edges[p] = ps[index][minPrimInd].edges[p];
cons[connectionNum].primaryPath.freeEdges[p] = false;
}
for(int p = 0; p <= ps[index][potPathInd[minPrimInd][minBackInd]].index; ++p) {
cons[connectionNum].backupPath.edges[p] = ps[index][potPathInd[minPrimInd][minBackInd]].edges[p];
}
//Select Channels
selectChannels(&cons[connectionNum],channels);
//Increase the network load
increaseLoad(&cons[connectionNum],channels);
//--------------Print Network Load--------------//
for(int m = 0; m < 2*N_EDGES; ++m) {
cout << "LOAD: " << edgeList[m].v1 << " -> " << edgeList[m].v2 << ": " << edgeList[m].load << " | TP: " << edgeList[m].totalProtected << " | ";
if(edgeList[m].load > 0) {
for(int c = 0; c < edgeList[m].load; ++c) {
cout << "C" << c << ": " << channels[m][c].numBackups << " ";
if(channels[m][c].primary == true) {
cout << "P ";
}
}
}
cout << "\n";
}
//--------------Clean up memory--------------//
for(int i = 0; i < numPossiblePaths; ++i) {
delete[] potPathInd[i];
}
delete[] potPathInd;
for(int i = 0; i < numPossiblePaths; ++i) {
delete[] pathCosts[i];
}
delete[] pathCosts;
connectionNum++;
}//end loop
cpu_endTime = clock();
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
delete[] ps[i];
}
delete[] ps;
delete[] npaths;
cout << "ps and npaths deleted\n";
//cpu_endTime = clock();
cpu_elapsedTime = ((double)(cpu_endTime-cpu_startTime)/CLOCKS_PER_SEC)*1000;
cout << "CPU Total Elapsed Time: " << cpu_elapsedTime << "\n";
}
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS], Connection *d_con) {
if((*connection).primaryPath.index < 0) {
cout << "Primary Path DNE?\n";
return;
}
//Increment the network load; put the backup on the channels
//Here we are incrementing the network load for the PRIMARY PATH
for(int i = 0; i <= (*connection).primaryPath.index; ++i) {
//Every edge in the primary path gets its load increased
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].primary = true;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].backupsOnChannel[0] = connection;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].d_backupsOnChannel[0] = d_con;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].numBackups += 1;
(*(*connection).primaryPath.edges[i]).load += 1;
(*(*connection).primaryPath.edges[i]).totalProtected += 1;
}
//Here we are increasing the network load for the BACKUP PATH
for(int i = 0; i <= (*connection).backupPath.index; ++i) {
//Temp
Edge *e = (*connection).backupPath.edges[i];
int cNum = (*connection).backupPath.channelNum[i];
//first path to use this channel, or this is not a free edge for the backup path.
//if(channels[(*e).edgeNum][cNum].numBackups == 0 || (*(*connection).backupPath).freeEdges[i] == false) {
if((*connection).backupPath.freeEdges[i] == false) {
(*e).load += 1;
}
//Marks that the connection is protected on this channel.
int en = (*e).edgeNum;
int numbs = channels[en][cNum].numBackups;
channels[en][cNum].primary = false;
channels[en][cNum].backupsOnChannel[numbs] = connection;
channels[en][cNum].d_backupsOnChannel[numbs] = d_con;
channels[en][cNum].numBackups += 1;
(*e).totalProtected +=1;
}
}
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS]) {
if((*connection).primaryPath.index < 0) {
cout << "Primary Path DNE?\n";
return;
}
//Increment the network load; put the backup on the channels
//Here we are incrementing the network load for the PRIMARY PATH
for(int i = 0; i <= (*connection).primaryPath.index; ++i) {
//Every edge in the primary path gets its load increased
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].primary = true;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].backupsOnChannel[0] = connection;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].numBackups += 1;
(*(*connection).primaryPath.edges[i]).load += 1;
(*(*connection).primaryPath.edges[i]).totalProtected += 1;
}
//Here we are increasing the network load for the BACKUP PATH
for(int i = 0; i <= (*connection).backupPath.index; ++i) {
//Temp
Edge *e = (*connection).backupPath.edges[i];
int cNum = (*connection).backupPath.channelNum[i];
//first path to use this channel, or this is not a free edge for the backup path.
//if(channels[(*e).edgeNum][cNum].numBackups == 0 || (*(*connection).backupPath).freeEdges[i] == false) {
if((*connection).backupPath.freeEdges[i] == false) {
(*e).load += 1;
}
//Marks that the connection is protected on this channel.
int en = (*e).edgeNum;
int numbs = channels[en][cNum].numBackups;
channels[en][cNum].primary = false;
channels[en][cNum].backupsOnChannel[numbs] = connection;
channels[en][cNum].numBackups += 1;
(*e).totalProtected +=1;
}
}
//TODO: This method contains a lot of redundant code that is also in computeCostForBackups. Consider combining.
//I wanted to modularize the code as much as possible this time around, which is why there's so much redundancy in this method.
void selectChannels(Connection *c, Channel chan[2*N_EDGES][MAX_CHANNELS]) {
/*
cout << "prim\n";
for(int i = 0; i <= (*(*c).primaryPath).index; ++i) {
cout << (*(*(*c).primaryPath).edges[i]).v1 << " -> " << (*(*(*c).primaryPath).edges[i]).v2 << "\n";
}
cout << "back\n";
for(int i = 0; i <= (*(*c).backupPath).index; ++i) {
cout << (*(*(*c).backupPath).edges[i]).v1 << " -> " << (*(*(*c).backupPath).edges[i]).v2 << "\n";
}
*/
int edgeNum = -1;
//Select Primary path channels;
for(int p = 0; p <= (*c).primaryPath.index; ++p){
edgeNum = (*(*c).primaryPath.edges[p]).edgeNum;
bool allSet = false;
for(int ch = 0; !allSet && ch < MAX_CHANNELS; ++ch) {
if(chan[edgeNum][ch].numBackups == 0) {
allSet = true;
(*c).primaryPath.channelNum[p] = ch;
}
}
}
for(int e = 0; e <= (*c).backupPath.index; ++e) {
bool free = false;
edgeNum = (*(*c).backupPath.edges[e]).edgeNum;
int firstOpenChannel = MAX_CHANNELS+1;
for(int ch = 0; !free && ch < MAX_CHANNELS; ++ch) {
if(chan[edgeNum][ch].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(chan[edgeNum][ch].numBackups == 0) {
if(ch < firstOpenChannel) {
firstOpenChannel = ch;
}
//continue;
break;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < chan[edgeNum][ch].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*chan[edgeNum][ch].backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= (*c).primaryPath.index; ++e3 ) {
if((*chan[edgeNum][ch].backupsOnChannel[bup]).primaryPath.edges[e2] == (*c).primaryPath.edges[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
//goto CHANNEL_LOOP_END;
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
(*c).backupPath.channelNum[e] = ch;
(*c).backupPath.freeEdges[e] = true;
}
}
if((*c).backupPath.freeEdges[e] == false) {
(*c).backupPath.channelNum[e] = firstOpenChannel;
}
}
//cout << "all set?\n";
}
void computeCostForBackups(SimplePath *p, int *potPathInd, int numPossiblePaths, int primaryInd, int *pathCosts, Channel cs[2*N_EDGES][MAX_CHANNELS]) {
for(int i = 0; i < numPossiblePaths; ++i) {
if(potPathInd[i] == -1) {
pathCosts[i] = -1;
break;
}
int pid = potPathInd[i];
int cost = 0;
for(int e = 0; e <= p[pid].index; ++e) {
bool free = false;
int edgeNum = (*p[pid].edges[e]).edgeNum;
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
if(cs[edgeNum][c].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[edgeNum][c].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < channels[edgeNum][c].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[primaryInd].index; ++e3 ) {
if((*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.edges[e2] == p[primaryInd].edges[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
pathCosts[i] = cost;
}
}
//TODO: There's some sketchiness going on with numPossiblePaths vs NUM_CONNECTIONS.
int determineCompatibleBackups(SimplePath *p, int *potPathInd, int numPossiblePaths, int pInd) {
int numDisjoint = 0;
int numConf = 0;
//cout << "SRC: " << p[pInd].sourceNode << " DEST: " << p[pInd].destNode << "\n";
//First pass checks to see which simple paths are disjoint from the primary path.
for(int i = 0; i < NUM_CONNECTIONS; ++i) {
if(p[i].hops <= 0 || p[i].index < 0|| p[pInd].hops <= 0 || p[pInd].index < 0){numConf++; continue;}
bool disjoint = true;
//Check each edge to make sure they're disjoint
for(int e1 = 0; disjoint && e1 <= p[pInd].index; ++e1) {
for(int e2 = 0; disjoint && e2 <= p[i].index; ++e2) {
if(p[i].edgeNums[e2] == p[pInd].edgeNums[e1]) {
disjoint = false;
numConf++;
}
}
}
if(disjoint) {
potPathInd[numDisjoint] = i;
numDisjoint++;
}
}
//Mark the end of the array
potPathInd[numDisjoint] = -1;
//cout << "disjoint: " << numDisjoint << " out of " << numPossiblePaths <<"\n";
//cout << "conflicts: " << numConf << "\n";
return numDisjoint;
}
int computeAllSimplePathsN(SimplePath **ps, int *vertexList, Edge *edgeList, int sourceNode, int destNode, int hops) {
int index = (sourceNode * N_NODES) + destNode;
//initialize arrays
int visited[N_NODES]; //visited[i] is 1 if node i has been visited on this path, 0 otherwise.
int currentPath = 0;
//edgeListIndex[i] contains the index into edgeList[] (aka the compact adj list) for node i.
int edgeListIndex[N_NODES];
ps[index][currentPath].index = 0;
//Initialize our search components
for(int i = 0; i < N_NODES; ++i) {
visited[i] = 0;
edgeListIndex[i] = vertexList[i];
}
stack <int> st;
int currentNode;
int neighbor;
int currentHop = 1;
st.push(sourceNode);
visited[sourceNode] = 1;
while(st.size() > 0) {
//use loopCond to get to the beginning of the while loop from inside the for loop.
bool loopCond = true;
currentNode = st.top();
//for each neighbor of currentNode
for(; loopCond == true && edgeListIndex[currentNode] < vertexList[currentNode+1]; ++edgeListIndex[currentNode]) {
neighbor = edgeList[edgeListIndex[currentNode]].v2;
//If we're too far away from our source node, backtrack.
if(currentHop >= hops) {
break;
}
if(edgeList[edgeListIndex[currentNode]].load == MAX_CHANNELS) {
continue;
}
//If our neighbor is the desired node, AND we're at the correct path length, save this path!
if(neighbor == destNode && currentHop < hops) {
ps[index][currentPath].edges[ps[index][currentPath].index] = &edgeList[edgeListIndex[currentNode]];
ps[index][currentPath].edgeNums[ps[index][currentPath].index] = edgeList[edgeListIndex[currentNode]].edgeNum;
ps[index][currentPath].sourceNode = sourceNode;
ps[index][currentPath].destNode = destNode;
ps[index][currentPath].hops = currentHop;
//Copy the whole path up until the dest node to the next path in the array.
//Note that we don't copy the COST from the current primary path, as the cost is computed
//independently for each primary path.
ps[index][currentPath+1].sourceNode = sourceNode;
ps[index][currentPath+1].destNode = destNode;
ps[index][currentPath+1].hops = currentHop;
ps[index][currentPath+1].index = ps[index][currentPath].index-1;
for(int i = 0; i < ps[index][currentPath].index; ++i) {
ps[index][currentPath+1].edges[i] = ps[index][currentPath].edges[i];
ps[index][currentPath+1].edgeNums[i] = ps[index][currentPath].edgeNums[i];
}
currentPath += 1;
ps[index][currentPath].index += 1;
++edgeListIndex[currentNode];
//
loopCond = false;
break;
}
if(!visited[neighbor]) {
ps[index][currentPath].edges[ps[index][currentPath].index] = &edgeList[edgeListIndex[currentNode]];
ps[index][currentPath].edgeNums[ps[index][currentPath].index] = edgeList[edgeListIndex[currentNode]].edgeNum;
ps[index][currentPath].index += 1;
st.push(neighbor);
visited[neighbor] = 1;
currentHop++;
//continue the while loop, but increment the ELI first.
++edgeListIndex[currentNode];
loopCond = false;
break;
}
}
if(loopCond) {
currentHop--;
//Once we've visited all of this node's neighbors, we reset it so that a
//different path involving this node can be explored.
visited[currentNode] = 0;
ps[index][currentPath].index -= 1;
edgeListIndex[currentNode] = vertexList[currentNode];
st.pop();
}
}
//Last path is invalid
ps[index][currentPath].hops = 0;
for(int j = currentPath; j < NUM_CONNECTIONS; ++j) {
ps[index][j].hops = 0;
}
return currentPath;
}
void readGraphReorderEdgeList(int vertexList[],Edge compactEdgeList[2*N_EDGES],Edge reorderedEdgeList[2*N_NODES]) {
//cout << "Beginning read\n";
//TODO: We def don't need this extra array... please revise.
int edgeList[N_NODES][N_NODES];
for(int i = 0; i < N_NODES; ++i) {
for(int j = 0; j < N_NODES; ++j) {
edgeList[i][j] = 0;
}
}
for(int i = 0; i < N_EDGES; ++i) {
edgeList[base_edges[i][0]][base_edges[i][1]] = 1;
edgeList[base_edges[i][1]][base_edges[i][0]] = 1;
}
int vDegree[N_NODES];
int counter = 0;
for(int i = 0; i < N_NODES; ++i) {
vertexList[i] = counter;
for(int j = 0; j < N_NODES; ++j) {
if(edgeList[i][j] != 0) {
compactEdgeList[counter].v1 = i;
compactEdgeList[counter].v2 = j;
compactEdgeList[counter].load = 0;
compactEdgeList[counter].totalProtected = 0;
compactEdgeList[counter].edgeNum = counter;
//for(int x = 0; x < MAX_CHANNELS; ++x) {
// compactEdgeList[counter].channels[x].numBackups = 0;
//}
counter++;
}
}
vDegree[i] = counter - vertexList[i];
//cout << i << ": " << vDegree[i] << "\n";
}
vertexList[N_NODES] = 2*N_EDGES;
//THis successfully reorders the edgelist based on the degree of the neighbor.
//TODO: make this sorting algorithm faster... like WAY faster.
for(int i = 0; i < N_NODES; ++i) {
int startInd = vertexList[i];
int endInd = vertexList[i+1];
//[startInd,endInd)
int reorderedInd = startInd;
while(reorderedInd < endInd) {
int min = startInd;
int minVal = 66666; //min degree of the neighbor
//Find the "smallest" neighbor of this node.
for(int j = startInd; j < endInd; ++j) {
bool isReordered = false;
//Check to see if this node is already in our reordered list.
for(int k = startInd; k < reorderedInd; ++k) {
if(reorderedEdgeList[k].v2 == compactEdgeList[j].v2) {
isReordered = true;
break;
}
}
//if its not in our reordered list and it qualifies as the minimum neighbor.
if(isReordered == false && vDegree[compactEdgeList[j].v2] <= minVal) {
min = j;
minVal = vDegree[compactEdgeList[j].v2];
}
}
reorderedEdgeList[reorderedInd].v1 = compactEdgeList[min].v1;
reorderedEdgeList[reorderedInd].v2 = compactEdgeList[min].v2;
reorderedEdgeList[reorderedInd].load = 0;
reorderedInd++;
}
}
}
| 7389569f3d9e68e0e7f1a70f76850c1fc11eba33.cu | /*
* File: maingpu.cu
* Author: jjbillings
*
* Created on October 16, 2016, 9:09 PM
*/
#include<cstdlib>
#include<stdio.h>
#include<queue>
#include<stack>
#include<iostream>
#include<fstream>
#include<ctime>
#include"nets.h"
using namespace std;
#define NUM_CONNECTIONS 1000
#define CONNECTIONS 1000
#define MAX_CHANNELS 500
#define SAMPLES 1
struct SimplePath;
struct Path;
struct Edge;
struct Connection;
struct Channel;
struct Channel{
bool primary; //is this channel used for a primary path?
int numBackups; //total protected;
Connection *backupsOnChannel[NUM_CONNECTIONS];//Realistically, there will be far fewer than NUM_CONNECTIONS
Connection *d_backupsOnChannel[NUM_CONNECTIONS];
};
struct Edge {
int edgeNum;
int v1;
int v2;
int load; //load <= MAX_CHANNELS. Also, load is the sum of the primary AND backups paths using it.
int totalProtected;
};
struct SimplePath {
int sourceNode;
int destNode;
int hops;
int index;
int edgeNums[N_NODES];
Edge *edges[N_NODES];
SimplePath() {
for(int i = 0; i < N_NODES; ++i) {
edgeNums[i] = -1;
edges[i] = 0;
}
sourceNode = -1;
destNode = -1;
hops = -1;
index = -1;
};
};
struct Path {
int sourceNode;
int destNode;
int hops;
int index;
int cost;
//Every path that uses a particular edge just has a reference to it (not a copy), so they can each manipulate it.
Edge *edges[N_NODES];
bool freeEdges[N_NODES]; //whether or not that edge has a cost of 0
int channelNum[N_NODES]; //Channel number for each edge that it uses
int edgeNums[N_NODES];
bool primary;
bool active;
};
struct Connection {
int sourceNode;
int destNode;
int combinedCost;
bool validBackup;
bool validPrimary;
Path backupPath;
Path primaryPath;
};
void readGraphReorderEdgeList(int vertexList[],Edge compactEdgeList[2*N_EDGES],Edge reorderedEdgeList[2*N_NODES]);
int computeAllSimplePathsN(SimplePath **ps, int *vertexList, Edge *edgeList, int sourceNode, int destNode, int hops);
void simulate(int *vertexList, Edge *edgeList);
void simulate_GPU(int *vertexList, Edge *edgeList);
void computeCostForBackupsWithGPU(SimplePath *p, int *potPathCosts, int primaryInd, Channel cs[2*N_EDGES][MAX_CHANNELS]);
int determineCompatibleBackups(SimplePath *p, int *potPathInd, int numPossiblePaths, int pInd);
void computeCostForBackups(SimplePath *p, int *potPathInd, int numPotPaths, int backupIndex, int *pathCosts,Channel cs[2*N_EDGES][MAX_CHANNELS]);
void selectChannels(Connection *c, Channel chan[2*N_EDGES][MAX_CHANNELS]);
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS], Connection *d_con);
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS]);
void prefilterCompatibleBackups(SimplePath *p, int *filteredPaths, int *numCompatPaths, int numPossiblePaths, int src, int dest);
int vertexList[N_NODES+1];
Edge edgeList[2*N_EDGES];
Edge reorderedEdgeList[2*N_EDGES];
Connection cons[NUM_CONNECTIONS];
Channel channels[2*N_EDGES][MAX_CHANNELS];
//-----------Kernel for Determining which Backups are compatible with which Primaries. WORKING---------//
__global__ void determineCompatibleBackups(SimplePath *ps, int *potPathCosts,int conInd){
int p_ind = (conInd * NUM_CONNECTIONS) + blockIdx.x;
int b_ind = (conInd * NUM_CONNECTIONS) + threadIdx.x;
int output_ind = (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x;
int primIndex = ps[p_ind].index;
int backIndex = ps[b_ind].index;
int primHops = ps[p_ind].hops;
int backHops = ps[b_ind].hops;
if(primHops > 0 && backHops > 0) {
bool disjoint = true;
for(int e1 = 0; disjoint && e1 <= primIndex; ++e1) {
for(int e2 = 0; disjoint && e2 <= backIndex; ++e2){
if(ps[p_ind].edgeNums[e1] == ps[b_ind].edgeNums[e2]) {
disjoint = false;
}
}
}
if(disjoint) {
potPathCosts[output_ind] = 1;
}else {
potPathCosts[output_ind] = -1;
}
}else {
potPathCosts[output_ind] = -1;
}
}
//---------Kernel for computing the cost of each primary/backup combo. WORKING -------//
__global__ void costsKernel(SimplePath *p, int *potPathCosts, int conInd , Channel *cs) {
int p_ind = (conInd * NUM_CONNECTIONS) + blockIdx.x;
int b_ind = (conInd * NUM_CONNECTIONS) + threadIdx.x;
int index = (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x;
//If we already know that this combo is unusable, just quit.
if(potPathCosts[index] == -1) {
return;
}
int cost = 0;
for(int e = 0; e <= p[b_ind].index; ++e) {
bool free = false;
int edgeNum = p[b_ind].edgeNums[e];
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
int channelIndex = (edgeNum * MAX_CHANNELS)+c;
if(cs[channelIndex].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[channelIndex].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < cs[channelIndex].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[p_ind].index; ++e3 ) {
if((*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.edgeNums[e2] == p[p_ind].edgeNums[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
potPathCosts[index] = cost;
}
//---------Kernel for computing the cost of each primary/backup combo using the list of filtered paths. WORKING-------//
__global__ void filteredCostsKernel(SimplePath *p, int *potPathCosts, int conInd , Channel *cs, int *numCompatPaths, int *filteredPaths) {
int p_ind = (conInd * NUM_CONNECTIONS) + blockIdx.x;
int index = (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x;
if(threadIdx.x >= numCompatPaths[p_ind]) {
return;
}
int b_ind = filteredPaths[(conInd * NUM_CONNECTIONS * NUM_CONNECTIONS) + (blockIdx.x * NUM_CONNECTIONS) + threadIdx.x];
int cost = 0;
for(int e = 0; e <= p[b_ind].index; ++e) {
bool free = false;
int edgeNum = p[b_ind].edgeNums[e];
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
int channelIndex = (edgeNum * MAX_CHANNELS)+c;
if(cs[channelIndex].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[channelIndex].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
break;
//continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < cs[channelIndex].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[p_ind].index; ++e3 ) {
if((*cs[channelIndex].d_backupsOnChannel[bup]).primaryPath.edgeNums[e2] == p[p_ind].edgeNums[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
potPathCosts[index] = cost;
}
/*
*TODO: I totally thought I made the algorithm be based on BFS, but it is in fact based on DFS.
*So REVERSE the order of the edge list. Currently, the neighbor with the lowest degree gets pushed
*to the "bottom" of the stack, so we end up computing the path with high-degree nodes in it...
*/
int main(int argc, char** argv) {
cout <<"Welcome to main\n";
cudaFree(0);
for(int f = 0; f < (2*N_EDGES); ++f){
for(int g = 0; g < MAX_CHANNELS; ++g) {
channels[f][g].numBackups = 0;
channels[f][g].primary = false;
}
}
readGraphReorderEdgeList(vertexList,edgeList,reorderedEdgeList);
srand(time(NULL));
simulate_GPU(vertexList,edgeList);
//simulate(vertexList,edgeList);
return 0;
}
void simulate_GPU(int *vertexList, Edge *edgeList){
clock_t cpu_startTime, cpu_endTime;
double cpu_elapsedTime = 0;
float gpu_totalTime = 0;
int connectionNum = 0;
//Sizes for storage
const size_t sp_size = sizeof(SimplePath);
const size_t potPathCosts_size = (NUM_CONNECTIONS * NUM_CONNECTIONS) * sizeof(int);
const size_t ps_size = ((N_NODES*N_NODES)*NUM_CONNECTIONS)*sp_size; //Size of the entire 2D array
const size_t row_size = NUM_CONNECTIONS*sp_size; //Size of a SINGLE row in the array of SimplePaths
const size_t channels_size = ((2*N_EDGES)*MAX_CHANNELS)*sizeof(Channel);
const size_t filtered_compat_paths_size = (N_NODES*N_NODES*NUM_CONNECTIONS*NUM_CONNECTIONS*sizeof(int));
const size_t numPaths_size = N_NODES*N_NODES*sizeof(int);
const size_t numCompatPaths_size = N_NODES*N_NODES*NUM_CONNECTIONS*sizeof(int);
//Test Data
int v1[40] = {9, 5, 6, 1, 3, 5, 4, 9, 9, 9, 7, 8, 2, 10, 3, 5, 9, 3, 2, 3, 5, 2, 3, 3, 10, 9, 10, 2, 1, 1, 3, 2, 9, 5, 4, 6, 10, 5, 0, 1};
int v2[40] = {3, 8, 4, 3, 8, 3, 7, 1, 5, 6, 0, 6, 10, 5, 8, 2, 3, 6, 5, 4, 2, 3, 9, 7, 9, 5, 6, 5, 0, 2, 5, 5, 10, 3, 9, 3, 4, 1, 10, 2};
SimplePath **ps = new SimplePath*[N_NODES * N_NODES]; //Host pointer for paths storage
SimplePath *d_ps; //Device pointer for the array of SimplePaths
int *d_potPathCosts; //Device pointer for the array of Potential Path Costs
int *h_potPathCosts; //Host pointer for the array of potential path costs.
Connection *d_cons; //Device pointer to the array of connections.
Channel *d_channels; //Device pointer for the array of channels.
int *h_filteredPaths; //Host pointer for the flattened 3D array of paths which are filtered based on compatibility
int *d_filteredPaths; //Device pointer for filtered paths
int *numPosPaths; //Host pointer for array containing the number of paths for each src/dest pair
int *h_numCompatPaths; //Host pointer for the flattened 2D array
int *d_numCompatPaths;
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
ps[i] = new SimplePath[NUM_CONNECTIONS];
}
if(cudaSuccess != cudaMalloc((void **)&d_ps,ps_size)) {
cout << "Malloc Error\n";
}else {
cout << "allocated SimplePaths array on Device\n";
}
if(cudaSuccess != cudaMalloc((void **)&d_channels,channels_size)) {
cout << "Error Allocating channels on GPU\n";
}else {
cout << "Allocated Channels array on GPU\n";
}
cudaMalloc((void **)&d_cons,sizeof(Connection)*NUM_CONNECTIONS);
cudaMalloc((void **)&d_potPathCosts,potPathCosts_size);
cudaMalloc((void **)&d_filteredPaths,filtered_compat_paths_size);
cudaMalloc((void **)&d_numCompatPaths,numCompatPaths_size);
cudaMemcpy(d_channels,&channels,channels_size,cudaMemcpyHostToDevice);
h_potPathCosts = (int *)malloc(potPathCosts_size);
h_filteredPaths = (int *)malloc(filtered_compat_paths_size);
numPosPaths = (int *)malloc(numPaths_size);
h_numCompatPaths = (int *)malloc(numCompatPaths_size);
//Compute all simple paths
for(int src = 0; src < N_NODES; ++src) {
for(int dest = 0; dest < N_NODES; ++dest) {
if(src != dest) {
int index = (src*N_NODES)+dest;
numPosPaths[index] = computeAllSimplePathsN(ps,vertexList,edgeList,src,dest,N_NODES);
}else {
numPosPaths[(src*N_NODES)+dest] = 0; //Added so numPosPaths would have a real value for cases when src=dest.
}
}
}
//Filter the compatible paths
for(int src = 0; src < N_NODES; ++src) {
for(int dest = 0; dest < N_NODES; ++dest) {
int index = (src * N_NODES) + dest;
if(src != dest){
prefilterCompatibleBackups(ps[index], h_filteredPaths, h_numCompatPaths, numPosPaths[index], src, dest);
}
}
}
//Copy Simple paths to the GPU
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
cudaMemcpy(d_ps + (i*(NUM_CONNECTIONS)),ps[i],row_size,cudaMemcpyHostToDevice);
}
//Copy filtered paths to the GPU
cudaMemcpy(d_filteredPaths, h_filteredPaths, filtered_compat_paths_size, cudaMemcpyHostToDevice);
//Copy compatible paths array to GPU
cudaMemcpy(d_numCompatPaths, h_numCompatPaths, numCompatPaths_size, cudaMemcpyHostToDevice);
//Setup components for GPU benchmarking.
cudaEvent_t start, stop;
cpu_startTime = clock();
for(int c = 0; c < CONNECTIONS; ++c) {
//Attempt to allocate SOME connection onto the network
//int s = v1[connectionNum];
//int d = v2[connectionNum];
int s = 0;
int d = 0;
while(s == d) {
s = rand()%N_NODES;
d = rand()%N_NODES;
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
int index = (s*N_NODES) + d;
//BENCHMARKING
cudaEventRecord(start);
//--------Launch the Kernel---------//
filteredCostsKernel<<<numPosPaths[index],numPosPaths[index]>>>(d_ps, d_potPathCosts, index, d_channels, d_numCompatPaths, d_filteredPaths);
//BENCHMARKING
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milli = 0;
cudaEventElapsedTime(&milli,start,stop);
gpu_totalTime += milli;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaError_t error_code = cudaGetLastError();
if(cudaSuccess != error_code) {
cout << "CUDA ERROR IN KERNEL: " << error_code << "\n";
cout << "ERROR: " << cudaGetErrorString(error_code) << "\n";
}
//---------Copy the Results back to the host ---//
cudaMemcpy(h_potPathCosts,d_potPathCosts,potPathCosts_size,cudaMemcpyDeviceToHost);
//-----------Select the cheapest combo using GPU Results-----------//
int minCostGPU = 100000000;
int minPrimIndGPU = -1;
int minBackIndGPU = -1;
for(int p = 0; p < numPosPaths[index]; ++p) {
int primaryCostGPU = ps[index][p].hops;
for(int b = 0; b < h_numCompatPaths[(index*NUM_CONNECTIONS)+p]; ++b) {
int f = (p*NUM_CONNECTIONS)+b;
if(h_potPathCosts[(p*NUM_CONNECTIONS)+b] < 0) {
continue;
}
if((h_potPathCosts[(p*NUM_CONNECTIONS)+b] + primaryCostGPU) < minCostGPU) {
minCostGPU = (h_potPathCosts[(p*NUM_CONNECTIONS)+b] + primaryCostGPU);
minPrimIndGPU = p;
minBackIndGPU = h_filteredPaths[(index*NUM_CONNECTIONS*NUM_CONNECTIONS)+(p*NUM_CONNECTIONS)+b]-(index*NUM_CONNECTIONS);
}
}
}
cout << "Min cost on GPU is: " << minCostGPU << "\n";
cout << "PRIM: "<<minPrimIndGPU << "\n";
for(int i = 0; i <= ps[index][minPrimIndGPU].index; ++i) {
cout << (*ps[index][minPrimIndGPU].edges[i]).v1 << " -> " << (*ps[index][minPrimIndGPU].edges[i]).v2 << "\n";
}
cout << "BACK: " << minBackIndGPU << "\n";
for(int i = 0; i <= ps[index][minBackIndGPU].index; ++i) {
cout << (*ps[index][minBackIndGPU].edges[i]).v1 << " -> " << (*ps[index][minBackIndGPU].edges[i]).v2 << "\n";
}
//--------------Store the connection--------------//
cons[connectionNum].sourceNode = s;
cons[connectionNum].destNode = d;
cons[connectionNum].combinedCost = minCostGPU;
cons[connectionNum].validBackup = true;
cons[connectionNum].validPrimary = true;
cons[connectionNum].primaryPath.hops = ps[index][minPrimIndGPU].hops;
cons[connectionNum].primaryPath.index = ps[index][minPrimIndGPU].index;
cons[connectionNum].primaryPath.primary = true;
cons[connectionNum].backupPath.hops = ps[index][minBackIndGPU].hops;
cons[connectionNum].backupPath.index = ps[index][minBackIndGPU].index;
for(int p = 0; p <= ps[index][minPrimIndGPU].index; ++p) {
cons[connectionNum].primaryPath.edges[p] = ps[index][minPrimIndGPU].edges[p];
cons[connectionNum].primaryPath.freeEdges[p] = false;
cons[connectionNum].primaryPath.edgeNums[p] = ps[index][minPrimIndGPU].edgeNums[p];
}
for(int p = 0; p <= ps[index][minBackIndGPU].index; ++p) {
cons[connectionNum].backupPath.edges[p] = ps[index][minBackIndGPU].edges[p];
cons[connectionNum].backupPath.edgeNums[p] = ps[index][minBackIndGPU].edgeNums[p];
}
//Select the appropriate Channels for the selected connection
selectChannels(&cons[connectionNum],channels);
//Increase the network load
increaseLoad(&cons[connectionNum],channels,&d_cons[connectionNum]);
//NOTE: We can 100% only copy individual channels to the GPU. i.e. if only channels 3 and 41 were updated, we can copy ONLY those channels if we want to
cudaMemcpy(d_channels,&channels,channels_size,cudaMemcpyHostToDevice);
cudaMemcpy(&d_cons[connectionNum],&cons[connectionNum],sizeof(Connection),cudaMemcpyHostToDevice);
//--------------Print Network Load--------------//
for(int m = 0; m < 2*N_EDGES; ++m) {
cout << "LOAD: " << edgeList[m].v1 << " -> " << edgeList[m].v2 << ": " << edgeList[m].load << " | TP: " << edgeList[m].totalProtected << " | ";
if(edgeList[m].load > 0) {
for(int c = 0; c < edgeList[m].load; ++c) {
cout << "C" << c << ": " << channels[m][c].numBackups << " ";
if(channels[m][c].primary == true) {
cout << "P ";
}
}
}
cout << "\n";
}
connectionNum++;
}//ENDFOR
cudaDeviceSynchronize();
cpu_endTime = clock();
//--------------Clean up memory--------------//
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
delete[] ps[i];
}
delete[] ps;
cudaFree(d_ps);
cudaFree(d_potPathCosts);
cudaFree(d_channels);
cudaFree(d_cons);
cudaFree(d_filteredPaths);
cudaFree(d_numCompatPaths);
free(h_potPathCosts);
free(h_filteredPaths);
free(numPosPaths);
free(h_numCompatPaths);
cpu_elapsedTime = ((double) (cpu_endTime - cpu_startTime)/CLOCKS_PER_SEC) * 1000;
cout << "Kernel Execution took: " << gpu_totalTime << " milliseconds\n";
cout << "Total time: " << cpu_elapsedTime << " milliseconds\n";
}
void prefilterCompatibleBackups(SimplePath *p, int *filteredPaths, int *numCompatPaths, int numPossiblePaths, int src, int dest) {
int numDisjoint = 0;
int numConf = 0;
for(int pInd = 0; pInd < numPossiblePaths; ++pInd) {
for(int bInd = 0; bInd < numPossiblePaths; ++bInd) {
if(p[pInd].hops > 0 && p[bInd].hops > 0) {
bool disjoint = true;
for(int e1 = 0; disjoint && e1 <= p[pInd].index; ++e1) {
for(int e2 = 0; disjoint && e2 <= p[bInd].index; ++e2) {
if(p[bInd].edgeNums[e2] == p[pInd].edgeNums[e1]) {
disjoint = false;
numConf++;
}
}
}
if(disjoint) {
int filteredIndex = (NUM_CONNECTIONS*NUM_CONNECTIONS*((src*N_NODES)+dest)) + (pInd*NUM_CONNECTIONS) + numDisjoint;
filteredPaths[filteredIndex] = (((src*N_NODES)+dest)*NUM_CONNECTIONS)+bInd;//Index for this compatible backup path.
numDisjoint++;
}
}
}
//Done checking all backups for this primary
int index = (((src*N_NODES)+dest)*NUM_CONNECTIONS) + pInd;
numCompatPaths[index] = numDisjoint;
numDisjoint = 0;
}
}
//-----------No longer using this method, since we have switched to GPU---------//
void computeCostForBackupsWithGPU(SimplePath *p, int *potPathCosts, int primaryInd, Channel cs[2*N_EDGES][MAX_CHANNELS]) {
for(int i = 0; i < NUM_CONNECTIONS; ++i) {
int pid = (primaryInd * NUM_CONNECTIONS) + i;
if(potPathCosts[pid] == -1) {
continue;
}
int cost = 0;
for(int e = 0; e <= p[i].index; ++e) {
bool free = false;
int edgeNum = (*p[i].edges[e]).edgeNum;
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
if(cs[edgeNum][c].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[edgeNum][c].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < channels[edgeNum][c].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[primaryInd].index; ++e3 ) {
if((*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.edges[e2] == p[primaryInd].edges[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
potPathCosts[pid] = cost;
}
}
void simulate(int *vertexList, Edge *edgeList){
clock_t cpu_startTime, cpu_endTime;
double cpu_elapsedTime = 0;
//cpu_startTime = clock();
//Test Data
int v1[40] = {9, 5, 6, 1, 3, 5, 4, 9, 9, 9, 7, 8, 2, 10, 3, 5, 9, 3, 2, 3, 5, 2, 3, 3, 10, 9, 10, 2, 1, 1, 3, 2, 9, 5, 4, 6, 10, 5, 0, 1};
int v2[40] = {3, 8, 4, 3, 8, 3, 7, 1, 5, 6, 0, 6, 10, 5, 8, 2, 3, 6, 5, 4, 2, 3, 9, 7, 9, 5, 6, 5, 0, 2, 5, 5, 10, 3, 9, 3, 4, 1, 10, 2};
int connectionNum = 0;
//We want to compute and store all possible paths between our source and desitination.
SimplePath **ps = new SimplePath*[N_NODES * N_NODES]; //Storage for paths
int *npaths = new int[N_NODES*N_NODES];
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
ps[i] = new SimplePath[NUM_CONNECTIONS];
}
//We COULD parallelize this by giving a thread a source/dest combo to compute the paths of. potentially beneficial for large graphs
for(int src = 0; src < N_NODES; ++src) {
for(int dest = 0; dest < N_NODES; ++dest) {
if(src != dest) {
int index = (src*N_NODES)+dest;
npaths[index] = computeAllSimplePathsN(ps,vertexList,edgeList,src,dest,N_NODES);
//cout <<"All simple paths computed and stored! " << npaths[index] << " paths between " << src << " and " << dest << "\n";
}
}
}
//At this point, we COULD delete[] any paths in the array that we didn't use.
cpu_startTime = clock();
for(int num = 0; num < CONNECTIONS; ++num) {
//Attempt to allocate SOME connection onto the network
//int s = v1[connectionNum];
//int d = v2[connectionNum];
int s = rand() % N_NODES;
int d = rand() % N_NODES;
while(s == d) {
s = rand()%N_NODES;
d = rand()%N_NODES;
}
//Allocate storage for the potential primary/backup path combos
int index = (s*N_NODES) + d;
int numPossiblePaths = npaths[index];
//Stores indices into the ps[index][] array for each disjoint backup path.
//potPathInd[i][j] = k where ps[index][k] is a path that is edge-disjoint from ps[index][i].
int ** potPathInd = new int*[NUM_CONNECTIONS];
for(int i = 0; i < NUM_CONNECTIONS; ++i) {
potPathInd[i] = new int[NUM_CONNECTIONS];
}
//--------------Find all paths which are edge-disjoint from this primary--------------//
int k = -1;
//On the GPU, instead of iterating i..numPossiblePaths, we would give thread_i backup_i
for(int i = 0; i < numPossiblePaths; ++i) {
k = determineCompatibleBackups(ps[index],potPathInd[i],numPossiblePaths,i);
//cout << "Number of paths which are disjoint from this primary path: " << k << "\n";
}
//--------------Compute Cost for each backup path--------------//
int ** pathCosts = new int*[numPossiblePaths];
for(int i = 0; i < numPossiblePaths; ++i) {
pathCosts[i] = new int[numPossiblePaths];
}
for(int i = 0; i < numPossiblePaths; ++i) {
computeCostForBackups(ps[index],potPathInd[i],numPossiblePaths,i,pathCosts[i],channels);
}
//--------------Select cheapest connection--------------//
int minCost = 100000000;
int minPrimInd = -1;
int minBackInd = -1;
for(int p = 0; p < numPossiblePaths; ++p) {
int backInd = 0;
int primaryCost = ps[index][p].hops;
while(pathCosts[p][backInd] != -1) {
if((pathCosts[p][backInd] + primaryCost) < minCost) {
minCost = (pathCosts[p][backInd] + primaryCost);
minPrimInd = p;
minBackInd = backInd;
}
backInd++;
}
}
cout << "Min cost is: " << minCost << "\n";
//--------------Store the connection--------------//
cons[connectionNum].sourceNode = s;
cons[connectionNum].destNode = d;
cons[connectionNum].combinedCost = minCost;
cons[connectionNum].validBackup = true;
cons[connectionNum].validPrimary = true;
//cons[connectionNum].backupPath = new Path();
//cons[connectionNum].primaryPath = new Path();
cons[connectionNum].primaryPath.hops = ps[index][minPrimInd].hops;
cons[connectionNum].primaryPath.index = ps[index][minPrimInd].index;
cons[connectionNum].primaryPath.primary = true;
cons[connectionNum].backupPath.hops = ps[index][potPathInd[minPrimInd][minBackInd]].hops;
cons[connectionNum].backupPath.index = ps[index][potPathInd[minPrimInd][minBackInd]].index;
for(int p = 0; p <= ps[index][minPrimInd].index; ++p) {
cons[connectionNum].primaryPath.edges[p] = ps[index][minPrimInd].edges[p];
cons[connectionNum].primaryPath.freeEdges[p] = false;
}
for(int p = 0; p <= ps[index][potPathInd[minPrimInd][minBackInd]].index; ++p) {
cons[connectionNum].backupPath.edges[p] = ps[index][potPathInd[minPrimInd][minBackInd]].edges[p];
}
//Select Channels
selectChannels(&cons[connectionNum],channels);
//Increase the network load
increaseLoad(&cons[connectionNum],channels);
//--------------Print Network Load--------------//
for(int m = 0; m < 2*N_EDGES; ++m) {
cout << "LOAD: " << edgeList[m].v1 << " -> " << edgeList[m].v2 << ": " << edgeList[m].load << " | TP: " << edgeList[m].totalProtected << " | ";
if(edgeList[m].load > 0) {
for(int c = 0; c < edgeList[m].load; ++c) {
cout << "C" << c << ": " << channels[m][c].numBackups << " ";
if(channels[m][c].primary == true) {
cout << "P ";
}
}
}
cout << "\n";
}
//--------------Clean up memory--------------//
for(int i = 0; i < numPossiblePaths; ++i) {
delete[] potPathInd[i];
}
delete[] potPathInd;
for(int i = 0; i < numPossiblePaths; ++i) {
delete[] pathCosts[i];
}
delete[] pathCosts;
connectionNum++;
}//end loop
cpu_endTime = clock();
for(int i = 0; i < (N_NODES*N_NODES); ++i) {
delete[] ps[i];
}
delete[] ps;
delete[] npaths;
cout << "ps and npaths deleted\n";
//cpu_endTime = clock();
cpu_elapsedTime = ((double)(cpu_endTime-cpu_startTime)/CLOCKS_PER_SEC)*1000;
cout << "CPU Total Elapsed Time: " << cpu_elapsedTime << "\n";
}
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS], Connection *d_con) {
if((*connection).primaryPath.index < 0) {
cout << "Primary Path DNE?\n";
return;
}
//Increment the network load; put the backup on the channels
//Here we are incrementing the network load for the PRIMARY PATH
for(int i = 0; i <= (*connection).primaryPath.index; ++i) {
//Every edge in the primary path gets its load increased
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].primary = true;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].backupsOnChannel[0] = connection;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].d_backupsOnChannel[0] = d_con;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].numBackups += 1;
(*(*connection).primaryPath.edges[i]).load += 1;
(*(*connection).primaryPath.edges[i]).totalProtected += 1;
}
//Here we are increasing the network load for the BACKUP PATH
for(int i = 0; i <= (*connection).backupPath.index; ++i) {
//Temp
Edge *e = (*connection).backupPath.edges[i];
int cNum = (*connection).backupPath.channelNum[i];
//first path to use this channel, or this is not a free edge for the backup path.
//if(channels[(*e).edgeNum][cNum].numBackups == 0 || (*(*connection).backupPath).freeEdges[i] == false) {
if((*connection).backupPath.freeEdges[i] == false) {
(*e).load += 1;
}
//Marks that the connection is protected on this channel.
int en = (*e).edgeNum;
int numbs = channels[en][cNum].numBackups;
channels[en][cNum].primary = false;
channels[en][cNum].backupsOnChannel[numbs] = connection;
channels[en][cNum].d_backupsOnChannel[numbs] = d_con;
channels[en][cNum].numBackups += 1;
(*e).totalProtected +=1;
}
}
void increaseLoad(Connection *connection, Channel channels[2*N_EDGES][MAX_CHANNELS]) {
if((*connection).primaryPath.index < 0) {
cout << "Primary Path DNE?\n";
return;
}
//Increment the network load; put the backup on the channels
//Here we are incrementing the network load for the PRIMARY PATH
for(int i = 0; i <= (*connection).primaryPath.index; ++i) {
//Every edge in the primary path gets its load increased
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].primary = true;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].backupsOnChannel[0] = connection;
channels[(*(*connection).primaryPath.edges[i]).edgeNum][(*connection).primaryPath.channelNum[i]].numBackups += 1;
(*(*connection).primaryPath.edges[i]).load += 1;
(*(*connection).primaryPath.edges[i]).totalProtected += 1;
}
//Here we are increasing the network load for the BACKUP PATH
for(int i = 0; i <= (*connection).backupPath.index; ++i) {
//Temp
Edge *e = (*connection).backupPath.edges[i];
int cNum = (*connection).backupPath.channelNum[i];
//first path to use this channel, or this is not a free edge for the backup path.
//if(channels[(*e).edgeNum][cNum].numBackups == 0 || (*(*connection).backupPath).freeEdges[i] == false) {
if((*connection).backupPath.freeEdges[i] == false) {
(*e).load += 1;
}
//Marks that the connection is protected on this channel.
int en = (*e).edgeNum;
int numbs = channels[en][cNum].numBackups;
channels[en][cNum].primary = false;
channels[en][cNum].backupsOnChannel[numbs] = connection;
channels[en][cNum].numBackups += 1;
(*e).totalProtected +=1;
}
}
//TODO: This method contains a lot of redundant code that is also in computeCostForBackups. Consider combining.
//I wanted to modularize the code as much as possible this time around, which is why there's so much redundancy in this method.
void selectChannels(Connection *c, Channel chan[2*N_EDGES][MAX_CHANNELS]) {
/*
cout << "prim\n";
for(int i = 0; i <= (*(*c).primaryPath).index; ++i) {
cout << (*(*(*c).primaryPath).edges[i]).v1 << " -> " << (*(*(*c).primaryPath).edges[i]).v2 << "\n";
}
cout << "back\n";
for(int i = 0; i <= (*(*c).backupPath).index; ++i) {
cout << (*(*(*c).backupPath).edges[i]).v1 << " -> " << (*(*(*c).backupPath).edges[i]).v2 << "\n";
}
*/
int edgeNum = -1;
//Select Primary path channels;
for(int p = 0; p <= (*c).primaryPath.index; ++p){
edgeNum = (*(*c).primaryPath.edges[p]).edgeNum;
bool allSet = false;
for(int ch = 0; !allSet && ch < MAX_CHANNELS; ++ch) {
if(chan[edgeNum][ch].numBackups == 0) {
allSet = true;
(*c).primaryPath.channelNum[p] = ch;
}
}
}
for(int e = 0; e <= (*c).backupPath.index; ++e) {
bool free = false;
edgeNum = (*(*c).backupPath.edges[e]).edgeNum;
int firstOpenChannel = MAX_CHANNELS+1;
for(int ch = 0; !free && ch < MAX_CHANNELS; ++ch) {
if(chan[edgeNum][ch].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(chan[edgeNum][ch].numBackups == 0) {
if(ch < firstOpenChannel) {
firstOpenChannel = ch;
}
//continue;
break;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < chan[edgeNum][ch].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*chan[edgeNum][ch].backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= (*c).primaryPath.index; ++e3 ) {
if((*chan[edgeNum][ch].backupsOnChannel[bup]).primaryPath.edges[e2] == (*c).primaryPath.edges[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
//goto CHANNEL_LOOP_END;
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
(*c).backupPath.channelNum[e] = ch;
(*c).backupPath.freeEdges[e] = true;
}
}
if((*c).backupPath.freeEdges[e] == false) {
(*c).backupPath.channelNum[e] = firstOpenChannel;
}
}
//cout << "all set?\n";
}
void computeCostForBackups(SimplePath *p, int *potPathInd, int numPossiblePaths, int primaryInd, int *pathCosts, Channel cs[2*N_EDGES][MAX_CHANNELS]) {
for(int i = 0; i < numPossiblePaths; ++i) {
if(potPathInd[i] == -1) {
pathCosts[i] = -1;
break;
}
int pid = potPathInd[i];
int cost = 0;
for(int e = 0; e <= p[pid].index; ++e) {
bool free = false;
int edgeNum = (*p[pid].edges[e]).edgeNum;
int firstOpenChannel = MAX_CHANNELS+1;
for(int c = 0; !free && c < MAX_CHANNELS; ++c) {
if(cs[edgeNum][c].primary == true) {
continue;
}
//At this point, we know that there are no primary paths on this channel
//Thus we must check and see if it is "free".
//we COULD use this channel, but there may be a "free" one further down.
if(cs[edgeNum][c].numBackups == 0) {
if(c < firstOpenChannel) {
firstOpenChannel = c;
}
continue;
}
bool disjoint = true;
//Check every connection currently on protected on the channel
for(int bup = 0; disjoint && bup < channels[edgeNum][c].numBackups; ++bup) {
//At this point, we know that there is at least one path protected on this channel.
//Technically, we should also know that it's not a primary path.
//for each edge of the protected connection's primary path
for(int e2 = 0; disjoint && e2 <= (*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.index; ++e2) {
//see if its the same edge as used by our primary path.
for(int e3 = 0; disjoint && e3 <= p[primaryInd].index; ++e3 ) {
if((*channels[edgeNum][c].backupsOnChannel[bup]).primaryPath.edges[e2] == p[primaryInd].edges[e3]) {
//There is a non-disjoint primary path on this channel, so it is unusable.
disjoint = false;
}
}
}
}
if(disjoint) {
//This channel is free
free = true;
}
}
if(!free) {
if(firstOpenChannel < MAX_CHANNELS) {
cost++;
}else {
cost = 1000000;
break;
}
}
}
pathCosts[i] = cost;
}
}
//TODO: There's some sketchiness going on with numPossiblePaths vs NUM_CONNECTIONS.
int determineCompatibleBackups(SimplePath *p, int *potPathInd, int numPossiblePaths, int pInd) {
int numDisjoint = 0;
int numConf = 0;
//cout << "SRC: " << p[pInd].sourceNode << " DEST: " << p[pInd].destNode << "\n";
//First pass checks to see which simple paths are disjoint from the primary path.
for(int i = 0; i < NUM_CONNECTIONS; ++i) {
if(p[i].hops <= 0 || p[i].index < 0|| p[pInd].hops <= 0 || p[pInd].index < 0){numConf++; continue;}
bool disjoint = true;
//Check each edge to make sure they're disjoint
for(int e1 = 0; disjoint && e1 <= p[pInd].index; ++e1) {
for(int e2 = 0; disjoint && e2 <= p[i].index; ++e2) {
if(p[i].edgeNums[e2] == p[pInd].edgeNums[e1]) {
disjoint = false;
numConf++;
}
}
}
if(disjoint) {
potPathInd[numDisjoint] = i;
numDisjoint++;
}
}
//Mark the end of the array
potPathInd[numDisjoint] = -1;
//cout << "disjoint: " << numDisjoint << " out of " << numPossiblePaths <<"\n";
//cout << "conflicts: " << numConf << "\n";
return numDisjoint;
}
int computeAllSimplePathsN(SimplePath **ps, int *vertexList, Edge *edgeList, int sourceNode, int destNode, int hops) {
int index = (sourceNode * N_NODES) + destNode;
//initialize arrays
int visited[N_NODES]; //visited[i] is 1 if node i has been visited on this path, 0 otherwise.
int currentPath = 0;
//edgeListIndex[i] contains the index into edgeList[] (aka the compact adj list) for node i.
int edgeListIndex[N_NODES];
ps[index][currentPath].index = 0;
//Initialize our search components
for(int i = 0; i < N_NODES; ++i) {
visited[i] = 0;
edgeListIndex[i] = vertexList[i];
}
stack <int> st;
int currentNode;
int neighbor;
int currentHop = 1;
st.push(sourceNode);
visited[sourceNode] = 1;
while(st.size() > 0) {
//use loopCond to get to the beginning of the while loop from inside the for loop.
bool loopCond = true;
currentNode = st.top();
//for each neighbor of currentNode
for(; loopCond == true && edgeListIndex[currentNode] < vertexList[currentNode+1]; ++edgeListIndex[currentNode]) {
neighbor = edgeList[edgeListIndex[currentNode]].v2;
//If we're too far away from our source node, backtrack.
if(currentHop >= hops) {
break;
}
if(edgeList[edgeListIndex[currentNode]].load == MAX_CHANNELS) {
continue;
}
//If our neighbor is the desired node, AND we're at the correct path length, save this path!
if(neighbor == destNode && currentHop < hops) {
ps[index][currentPath].edges[ps[index][currentPath].index] = &edgeList[edgeListIndex[currentNode]];
ps[index][currentPath].edgeNums[ps[index][currentPath].index] = edgeList[edgeListIndex[currentNode]].edgeNum;
ps[index][currentPath].sourceNode = sourceNode;
ps[index][currentPath].destNode = destNode;
ps[index][currentPath].hops = currentHop;
//Copy the whole path up until the dest node to the next path in the array.
//Note that we don't copy the COST from the current primary path, as the cost is computed
//independently for each primary path.
ps[index][currentPath+1].sourceNode = sourceNode;
ps[index][currentPath+1].destNode = destNode;
ps[index][currentPath+1].hops = currentHop;
ps[index][currentPath+1].index = ps[index][currentPath].index-1;
for(int i = 0; i < ps[index][currentPath].index; ++i) {
ps[index][currentPath+1].edges[i] = ps[index][currentPath].edges[i];
ps[index][currentPath+1].edgeNums[i] = ps[index][currentPath].edgeNums[i];
}
currentPath += 1;
ps[index][currentPath].index += 1;
++edgeListIndex[currentNode];
//
loopCond = false;
break;
}
if(!visited[neighbor]) {
ps[index][currentPath].edges[ps[index][currentPath].index] = &edgeList[edgeListIndex[currentNode]];
ps[index][currentPath].edgeNums[ps[index][currentPath].index] = edgeList[edgeListIndex[currentNode]].edgeNum;
ps[index][currentPath].index += 1;
st.push(neighbor);
visited[neighbor] = 1;
currentHop++;
//continue the while loop, but increment the ELI first.
++edgeListIndex[currentNode];
loopCond = false;
break;
}
}
if(loopCond) {
currentHop--;
//Once we've visited all of this node's neighbors, we reset it so that a
//different path involving this node can be explored.
visited[currentNode] = 0;
ps[index][currentPath].index -= 1;
edgeListIndex[currentNode] = vertexList[currentNode];
st.pop();
}
}
//Last path is invalid
ps[index][currentPath].hops = 0;
for(int j = currentPath; j < NUM_CONNECTIONS; ++j) {
ps[index][j].hops = 0;
}
return currentPath;
}
void readGraphReorderEdgeList(int vertexList[],Edge compactEdgeList[2*N_EDGES],Edge reorderedEdgeList[2*N_NODES]) {
//cout << "Beginning read\n";
//TODO: We def don't need this extra array... please revise.
int edgeList[N_NODES][N_NODES];
for(int i = 0; i < N_NODES; ++i) {
for(int j = 0; j < N_NODES; ++j) {
edgeList[i][j] = 0;
}
}
for(int i = 0; i < N_EDGES; ++i) {
edgeList[base_edges[i][0]][base_edges[i][1]] = 1;
edgeList[base_edges[i][1]][base_edges[i][0]] = 1;
}
int vDegree[N_NODES];
int counter = 0;
for(int i = 0; i < N_NODES; ++i) {
vertexList[i] = counter;
for(int j = 0; j < N_NODES; ++j) {
if(edgeList[i][j] != 0) {
compactEdgeList[counter].v1 = i;
compactEdgeList[counter].v2 = j;
compactEdgeList[counter].load = 0;
compactEdgeList[counter].totalProtected = 0;
compactEdgeList[counter].edgeNum = counter;
//for(int x = 0; x < MAX_CHANNELS; ++x) {
// compactEdgeList[counter].channels[x].numBackups = 0;
//}
counter++;
}
}
vDegree[i] = counter - vertexList[i];
//cout << i << ": " << vDegree[i] << "\n";
}
vertexList[N_NODES] = 2*N_EDGES;
//THis successfully reorders the edgelist based on the degree of the neighbor.
//TODO: make this sorting algorithm faster... like WAY faster.
for(int i = 0; i < N_NODES; ++i) {
int startInd = vertexList[i];
int endInd = vertexList[i+1];
//[startInd,endInd)
int reorderedInd = startInd;
while(reorderedInd < endInd) {
int min = startInd;
int minVal = 66666; //min degree of the neighbor
//Find the "smallest" neighbor of this node.
for(int j = startInd; j < endInd; ++j) {
bool isReordered = false;
//Check to see if this node is already in our reordered list.
for(int k = startInd; k < reorderedInd; ++k) {
if(reorderedEdgeList[k].v2 == compactEdgeList[j].v2) {
isReordered = true;
break;
}
}
//if its not in our reordered list and it qualifies as the minimum neighbor.
if(isReordered == false && vDegree[compactEdgeList[j].v2] <= minVal) {
min = j;
minVal = vDegree[compactEdgeList[j].v2];
}
}
reorderedEdgeList[reorderedInd].v1 = compactEdgeList[min].v1;
reorderedEdgeList[reorderedInd].v2 = compactEdgeList[min].v2;
reorderedEdgeList[reorderedInd].load = 0;
reorderedInd++;
}
}
}
|
d5b16b9940bbd1bba9a7577b7f4be3774c8b4a9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from dgemm_tesla_N_N.cu normal d -> s, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_N_N_64_16_16_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( sgemm_kernel_N_N_64_16_16_16_4), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| d5b16b9940bbd1bba9a7577b7f4be3774c8b4a9f.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from dgemm_tesla_N_N.cu normal d -> s, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m, n, k >= 512
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_N_N_64_16_16_16_4(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
/*
Taking care of invalid memory access in dimension M
*/
if ( ibx+idt >= m )
A += ibx+0;
else
A += ibx + idt;
C += ibx + idt + __mul24(iby, ldc);
B += tx+__mul24(iby, ldb);
/*
These variables guide the threads to avoid invalid memory accesses
in dimension N.
Simply it's the stopping criterion.
or you can say that access index wraps around to a valid memory location.
*/
int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb;
if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else
if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; }
if ( s1 == 0 )
B += __mul24(ty, ldb);
else
s1=0;
const float *Bend = B + k - k % 16;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if ( k > 15 ) {
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
}
/*
Common sub expression elimination.
*/
ibx = ibx + idt - m;
/*
remembering k dimension
*/
ldb = m = k;
/*
k changed to support the generic case and reuse valuable registers
*/
k = k % 16;
m -= k;
/*
Here we are taking care of k % dim_k portions
*/
if ( k != 0 ) {
/*
Avoid Invalid Memory access in dimension K
If some thread enters this if ( ) block first access to B
should be valid as K isn't divisible by blk_K
Note that dimension N has been taken care of by s1, s2, s3, s4
But depending upon K and thread index tx, some memory access
may be still invalid, so take care of them now by setting
s1, s2, s3, s4 = 0
B might have been advanced in the previous loop, take care
of that, this is about right bottom corner.
*/
if ( m + tx >= ldb ) {
s1 = s2 = s3 = s4 = 0;
B -= tx;
}
Bb[tx][ty+0 ] = B[s1];
Bb[tx][ty+4 ] = B[s2];
Bb[tx][ty+8 ] = B[s3];
Bb[tx][ty+12] = B[s4];
__syncthreads();
for(int i=0; i < k; i++) {
saxpy( A[0], &Bb[i+0][0], Cb );
A += lda;
}
}
/*
Now taking care of dimension M, N that doesnt fit into blocks
*/
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_N_N_64_16_16_16_4(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
sgemm_kernel_N_N_64_16_16_16_4<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
1c2fbf9c7032e4f17e5d226de02d805f037c9f71.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/IndexUtils.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
namespace faiss {
namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t)256 * 1024 * 1024;
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
const faiss::IndexBinaryFlat* index,
GpuIndexBinaryFlatConfig config)
: IndexBinary(index->d),
resources_(provider->getResources()),
binaryFlatConfig_(config) {
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
copyFrom(index);
}
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
int dims,
GpuIndexBinaryFlatConfig config)
: IndexBinary(dims),
resources_(provider->getResources()),
binaryFlatConfig_(std::move(config)) {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
// Construct index
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
}
GpuIndexBinaryFlat::~GpuIndexBinaryFlat() {}
int GpuIndexBinaryFlat::getDevice() const {
return binaryFlatConfig_.device;
}
std::shared_ptr<GpuResources> GpuIndexBinaryFlat::getResources() {
return resources_;
}
void GpuIndexBinaryFlat::copyFrom(const faiss::IndexBinaryFlat* index) {
DeviceScope scope(binaryFlatConfig_.device);
this->d = index->d;
// GPU code has 32 bit indices
FAISS_THROW_IF_NOT_FMT(
index->ntotal <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices; "
"attempting to copy CPU index with %zu parameters",
(size_t)std::numeric_limits<int>::max(),
(size_t)index->ntotal);
this->ntotal = index->ntotal;
// destroy old first before allocating new
data_.reset();
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
// The index could be empty
if (index->ntotal > 0) {
data_->add(
index->xb.data(),
index->ntotal,
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::copyTo(faiss::IndexBinaryFlat* index) const {
DeviceScope scope(binaryFlatConfig_.device);
index->d = this->d;
index->ntotal = this->ntotal;
FAISS_ASSERT(data_);
FAISS_ASSERT(data_->getSize() == this->ntotal);
index->xb.resize(this->ntotal * (this->d / 8));
if (this->ntotal > 0) {
fromDevice(
data_->getVectorsRef(),
index->xb.data(),
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::add(faiss::IndexBinary::idx_t n, const uint8_t* x) {
DeviceScope scope(binaryFlatConfig_.device);
validateNumVectors(n);
// To avoid multiple re-allocations, ensure we have enough storage
// available
data_->reserve(n, resources_->getDefaultStream(binaryFlatConfig_.device));
// Due to GPU indexing in int32, we can't store more than this
// number of vectors on a GPU
FAISS_THROW_IF_NOT_FMT(
this->ntotal + n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t)std::numeric_limits<int>::max());
data_->add(
(const unsigned char*)x,
n,
resources_->getDefaultStream(binaryFlatConfig_.device));
this->ntotal += n;
}
void GpuIndexBinaryFlat::reset() {
DeviceScope scope(binaryFlatConfig_.device);
// Free the underlying memory
data_->reset();
this->ntotal = 0;
}
void GpuIndexBinaryFlat::search(
faiss::IndexBinary::idx_t n,
const uint8_t* x,
faiss::IndexBinary::idx_t k,
int32_t* distances,
faiss::IndexBinary::idx_t* labels,
const SearchParameters* params) const {
DeviceScope scope(binaryFlatConfig_.device);
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
if (n == 0) {
return;
}
FAISS_THROW_IF_NOT_MSG(!params, "params not implemented");
validateNumVectors(n);
validateKSelect(k);
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<int32_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
distances,
stream,
{(int)n, (int)k});
// FlatIndex only supports an interface returning int indices
DeviceTensor<int, 2, true> outIntIndices(
resources_.get(),
makeTempAlloc(AllocType::Other, stream),
{(int)n, (int)k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t)n * (this->d / 8) * sizeof(uint8_t);
if (dataSize >= kMinPageSize) {
searchFromCpuPaged_(
n, x, k, outDistances.data(), outIntIndices.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k, outDistances.data(), outIntIndices.data());
}
// Convert and copy int indices out
auto outIndices = toDeviceTemporary<Index::idx_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
labels,
stream,
{(int)n, (int)k});
// Convert int to idx_t
convertTensor<int, Index::idx_t, 2>(stream, outIntIndices, outIndices);
// Copy back if necessary
fromDevice<int32_t, 2>(outDistances, distances, stream);
fromDevice<Index::idx_t, 2>(outIndices, labels, stream);
}
void GpuIndexBinaryFlat::searchNonPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<uint8_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
const_cast<uint8_t*>(x),
stream,
{n, (int)(this->d / 8)});
data_->query(vecs, k, outDistances, outIndices);
}
void GpuIndexBinaryFlat::searchFromCpuPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto vectorSize = sizeof(uint8_t) * (this->d / 8);
// Just page without overlapping copy with compute (as GpuIndexFlat does)
int batchSize = utils::nextHighestPowerOf2(
(int)((size_t)kMinPageSize / vectorSize));
for (int cur = 0; cur < n; cur += batchSize) {
int num = ::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(
num,
x + (size_t)cur * (this->d / 8),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
}
void GpuIndexBinaryFlat::reconstruct(
faiss::IndexBinary::idx_t key,
uint8_t* out) const {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_MSG(key < this->ntotal, "index out of bounds");
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
auto& vecs = data_->getVectorsRef();
auto vec = vecs[key];
fromDevice(vec.data(), out, vecs.getSize(1), stream);
}
} // namespace gpu
} // namespace faiss
| 1c2fbf9c7032e4f17e5d226de02d805f037c9f71.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/IndexUtils.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
namespace faiss {
namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t)256 * 1024 * 1024;
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
const faiss::IndexBinaryFlat* index,
GpuIndexBinaryFlatConfig config)
: IndexBinary(index->d),
resources_(provider->getResources()),
binaryFlatConfig_(config) {
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
copyFrom(index);
}
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
int dims,
GpuIndexBinaryFlatConfig config)
: IndexBinary(dims),
resources_(provider->getResources()),
binaryFlatConfig_(std::move(config)) {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
// Construct index
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
}
GpuIndexBinaryFlat::~GpuIndexBinaryFlat() {}
int GpuIndexBinaryFlat::getDevice() const {
return binaryFlatConfig_.device;
}
std::shared_ptr<GpuResources> GpuIndexBinaryFlat::getResources() {
return resources_;
}
void GpuIndexBinaryFlat::copyFrom(const faiss::IndexBinaryFlat* index) {
DeviceScope scope(binaryFlatConfig_.device);
this->d = index->d;
// GPU code has 32 bit indices
FAISS_THROW_IF_NOT_FMT(
index->ntotal <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices; "
"attempting to copy CPU index with %zu parameters",
(size_t)std::numeric_limits<int>::max(),
(size_t)index->ntotal);
this->ntotal = index->ntotal;
// destroy old first before allocating new
data_.reset();
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
// The index could be empty
if (index->ntotal > 0) {
data_->add(
index->xb.data(),
index->ntotal,
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::copyTo(faiss::IndexBinaryFlat* index) const {
DeviceScope scope(binaryFlatConfig_.device);
index->d = this->d;
index->ntotal = this->ntotal;
FAISS_ASSERT(data_);
FAISS_ASSERT(data_->getSize() == this->ntotal);
index->xb.resize(this->ntotal * (this->d / 8));
if (this->ntotal > 0) {
fromDevice(
data_->getVectorsRef(),
index->xb.data(),
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::add(faiss::IndexBinary::idx_t n, const uint8_t* x) {
DeviceScope scope(binaryFlatConfig_.device);
validateNumVectors(n);
// To avoid multiple re-allocations, ensure we have enough storage
// available
data_->reserve(n, resources_->getDefaultStream(binaryFlatConfig_.device));
// Due to GPU indexing in int32, we can't store more than this
// number of vectors on a GPU
FAISS_THROW_IF_NOT_FMT(
this->ntotal + n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t)std::numeric_limits<int>::max());
data_->add(
(const unsigned char*)x,
n,
resources_->getDefaultStream(binaryFlatConfig_.device));
this->ntotal += n;
}
void GpuIndexBinaryFlat::reset() {
DeviceScope scope(binaryFlatConfig_.device);
// Free the underlying memory
data_->reset();
this->ntotal = 0;
}
void GpuIndexBinaryFlat::search(
faiss::IndexBinary::idx_t n,
const uint8_t* x,
faiss::IndexBinary::idx_t k,
int32_t* distances,
faiss::IndexBinary::idx_t* labels,
const SearchParameters* params) const {
DeviceScope scope(binaryFlatConfig_.device);
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
if (n == 0) {
return;
}
FAISS_THROW_IF_NOT_MSG(!params, "params not implemented");
validateNumVectors(n);
validateKSelect(k);
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<int32_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
distances,
stream,
{(int)n, (int)k});
// FlatIndex only supports an interface returning int indices
DeviceTensor<int, 2, true> outIntIndices(
resources_.get(),
makeTempAlloc(AllocType::Other, stream),
{(int)n, (int)k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t)n * (this->d / 8) * sizeof(uint8_t);
if (dataSize >= kMinPageSize) {
searchFromCpuPaged_(
n, x, k, outDistances.data(), outIntIndices.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k, outDistances.data(), outIntIndices.data());
}
// Convert and copy int indices out
auto outIndices = toDeviceTemporary<Index::idx_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
labels,
stream,
{(int)n, (int)k});
// Convert int to idx_t
convertTensor<int, Index::idx_t, 2>(stream, outIntIndices, outIndices);
// Copy back if necessary
fromDevice<int32_t, 2>(outDistances, distances, stream);
fromDevice<Index::idx_t, 2>(outIndices, labels, stream);
}
void GpuIndexBinaryFlat::searchNonPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<uint8_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
const_cast<uint8_t*>(x),
stream,
{n, (int)(this->d / 8)});
data_->query(vecs, k, outDistances, outIndices);
}
void GpuIndexBinaryFlat::searchFromCpuPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto vectorSize = sizeof(uint8_t) * (this->d / 8);
// Just page without overlapping copy with compute (as GpuIndexFlat does)
int batchSize = utils::nextHighestPowerOf2(
(int)((size_t)kMinPageSize / vectorSize));
for (int cur = 0; cur < n; cur += batchSize) {
int num = std::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(
num,
x + (size_t)cur * (this->d / 8),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
}
void GpuIndexBinaryFlat::reconstruct(
faiss::IndexBinary::idx_t key,
uint8_t* out) const {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_MSG(key < this->ntotal, "index out of bounds");
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
auto& vecs = data_->getVectorsRef();
auto vec = vecs[key];
fromDevice(vec.data(), out, vecs.getSize(1), stream);
}
} // namespace gpu
} // namespace faiss
|
0dfe0decc72360fc20754fb1cd928a750d0b7a47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetrf_batched_smallsq_shfl.cu, normal z -> d, Mon Jun 25 18:24:15 2018
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. and shuffle for communication.
// It also uses lazy swap.
extern __shared__ double ddata[];
template<int N, int NSHFL>
__global__ void
dgetrf_batched_smallsq_shfl_kernel( double** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
double* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
double rA[N] = {MAGMA_D_ZERO};
double y[N] = {MAGMA_D_ZERO};
double reg = MAGMA_D_ZERO;
int max_id, current_piv_tx, rowid = tx, linfo = 0;
double rx_abs_max = MAGMA_D_ZERO;
// shared memory pointers
double* sx = (double*)(ddata);
int* sipiv = (int*)(sx + blockDim.y * NSHFL);
sx += ty * NSHFL;
sipiv += ty * (NSHFL+1);
volatile int* scurrent_piv_tx = (volatile int*)(sipiv + NSHFL);
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
sx[ rowid ] = fabs(MAGMA_D_REAL( rA[i] )) + fabs(MAGMA_D_IMAG( rA[i] ));
rx_abs_max = sx[i];
max_id = i;
#pragma unroll
for(int j = i; j < N; j++){
if( sx[j] > rx_abs_max){
max_id = j;
rx_abs_max = sx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO ) ? i+1 : 0;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
(*scurrent_piv_tx) = tx;
}
else if(rowid == i){
rowid = max_id;
}
current_piv_tx = (*scurrent_piv_tx);
#pragma unroll
for(int j = i; j < N; j++){
y[j] = magmablas_dshfl( rA[j], current_piv_tx, NSHFL);
}
reg = MAGMA_D_DIV(MAGMA_D_ONE, y[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * y[j];
}
}
}
// write
if( tx == 0 ){
(*info) = (magma_int_t)linfo;
}
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1);
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
dgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_dgetrf_batched_smallsq_shfl(
magma_int_t n,
double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_dgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(double);
shmem += ntcol * 1 * sizeof(int);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
switch(m){
case 1:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 12:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32:hipLaunchKernelGGL(( dgetrf_batched_smallsq_shfl_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
| 0dfe0decc72360fc20754fb1cd928a750d0b7a47.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetrf_batched_smallsq_shfl.cu, normal z -> d, Mon Jun 25 18:24:15 2018
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. and shuffle for communication.
// It also uses lazy swap.
extern __shared__ double ddata[];
template<int N, int NSHFL>
__global__ void
dgetrf_batched_smallsq_shfl_kernel( double** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
double* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
double rA[N] = {MAGMA_D_ZERO};
double y[N] = {MAGMA_D_ZERO};
double reg = MAGMA_D_ZERO;
int max_id, current_piv_tx, rowid = tx, linfo = 0;
double rx_abs_max = MAGMA_D_ZERO;
// shared memory pointers
double* sx = (double*)(ddata);
int* sipiv = (int*)(sx + blockDim.y * NSHFL);
sx += ty * NSHFL;
sipiv += ty * (NSHFL+1);
volatile int* scurrent_piv_tx = (volatile int*)(sipiv + NSHFL);
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
sx[ rowid ] = fabs(MAGMA_D_REAL( rA[i] )) + fabs(MAGMA_D_IMAG( rA[i] ));
rx_abs_max = sx[i];
max_id = i;
#pragma unroll
for(int j = i; j < N; j++){
if( sx[j] > rx_abs_max){
max_id = j;
rx_abs_max = sx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO ) ? i+1 : 0;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
(*scurrent_piv_tx) = tx;
}
else if(rowid == i){
rowid = max_id;
}
current_piv_tx = (*scurrent_piv_tx);
#pragma unroll
for(int j = i; j < N; j++){
y[j] = magmablas_dshfl( rA[j], current_piv_tx, NSHFL);
}
reg = MAGMA_D_DIV(MAGMA_D_ONE, y[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * y[j];
}
}
}
// write
if( tx == 0 ){
(*info) = (magma_int_t)linfo;
}
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1);
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
dgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_dgetrf_batched_smallsq_shfl(
magma_int_t n,
double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_dgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(double);
shmem += ntcol * 1 * sizeof(int);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
switch(m){
case 1: dgetrf_batched_smallsq_shfl_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2: dgetrf_batched_smallsq_shfl_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3: dgetrf_batched_smallsq_shfl_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4: dgetrf_batched_smallsq_shfl_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5: dgetrf_batched_smallsq_shfl_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6: dgetrf_batched_smallsq_shfl_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7: dgetrf_batched_smallsq_shfl_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8: dgetrf_batched_smallsq_shfl_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9: dgetrf_batched_smallsq_shfl_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10: dgetrf_batched_smallsq_shfl_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11: dgetrf_batched_smallsq_shfl_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 12: dgetrf_batched_smallsq_shfl_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13: dgetrf_batched_smallsq_shfl_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14: dgetrf_batched_smallsq_shfl_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15: dgetrf_batched_smallsq_shfl_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16: dgetrf_batched_smallsq_shfl_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17: dgetrf_batched_smallsq_shfl_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18: dgetrf_batched_smallsq_shfl_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19: dgetrf_batched_smallsq_shfl_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20: dgetrf_batched_smallsq_shfl_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21: dgetrf_batched_smallsq_shfl_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22: dgetrf_batched_smallsq_shfl_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23: dgetrf_batched_smallsq_shfl_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24: dgetrf_batched_smallsq_shfl_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25: dgetrf_batched_smallsq_shfl_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26: dgetrf_batched_smallsq_shfl_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27: dgetrf_batched_smallsq_shfl_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28: dgetrf_batched_smallsq_shfl_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29: dgetrf_batched_smallsq_shfl_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30: dgetrf_batched_smallsq_shfl_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31: dgetrf_batched_smallsq_shfl_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32: dgetrf_batched_smallsq_shfl_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
|
75c6a4c4c3e2f9205af0e0685242f3d1d3d08a07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
#define BLOCK_SIZE 16
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
// FILE *fp;
// char str[STR_SIZE];
// if( (fp = fopen(file, "w" )) == 0 )
// printf( "The file was not opened\n" );
for (i=0; i < grid_rows && i<10; i++) {
for (j=0; j < grid_cols && j<50; j++)
{
printf("%g ", vect[i*grid_cols+j]);
// fputs(str,fp);
index++;
}
printf("\n");
}
// fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened: %s\n", file );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_rows*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
int main(int argc, char** argv)
{
run(argc,argv);
return EXIT_SUCCESS;
}
#define MAX_STRING_LENGTH 1024
char dtfile[] = "data/temp.dat";
char dpfile[] = "data/power.dat";
char dofile[] = "output_pyramid.dat";
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char* tfile = (char*)&dtfile;
char* pfile = (char*)&dpfile;
char* ofile = (char*)&dofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc >= 2)
{
grid_rows = atoi(argv[1]);
grid_cols = atoi(argv[1]);
}
if (argc >= 3)
pyramid_height = atoi(argv[2]);
if (argc >= 4)
total_iterations = atoi(argv[3]);
if (argc >= 5) {
tfile = argv[4];
}
if (argc >= 6) {
pfile = argv[5];
}
if (argc >= 7) {
ofile = argv[6];
}
if (argc < 4 || argc > 7) {
printf("Wrong Usage: grid_rows/cols pyramid_height total_iterations\n");
exit(0);
}
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
hipMalloc((void**)&MatrixPower, sizeof(float)*size);
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice);
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost);
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
printf("\nTEST PASSED\n");
}
| 75c6a4c4c3e2f9205af0e0685242f3d1d3d08a07.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
#define BLOCK_SIZE 16
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
// FILE *fp;
// char str[STR_SIZE];
// if( (fp = fopen(file, "w" )) == 0 )
// printf( "The file was not opened\n" );
for (i=0; i < grid_rows && i<10; i++) {
for (j=0; j < grid_cols && j<50; j++)
{
printf("%g ", vect[i*grid_cols+j]);
// fputs(str,fp);
index++;
}
printf("\n");
}
// fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened: %s\n", file );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_rows*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
int main(int argc, char** argv)
{
run(argc,argv);
return EXIT_SUCCESS;
}
#define MAX_STRING_LENGTH 1024
char dtfile[] = "data/temp.dat";
char dpfile[] = "data/power.dat";
char dofile[] = "output_pyramid.dat";
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char* tfile = (char*)&dtfile;
char* pfile = (char*)&dpfile;
char* ofile = (char*)&dofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc >= 2)
{
grid_rows = atoi(argv[1]);
grid_cols = atoi(argv[1]);
}
if (argc >= 3)
pyramid_height = atoi(argv[2]);
if (argc >= 4)
total_iterations = atoi(argv[3]);
if (argc >= 5) {
tfile = argv[4];
}
if (argc >= 6) {
pfile = argv[5];
}
if (argc >= 7) {
ofile = argv[6];
}
if (argc < 4 || argc > 7) {
printf("Wrong Usage: grid_rows/cols pyramid_height total_iterations\n");
exit(0);
}
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
cudaMalloc((void**)&MatrixPower, sizeof(float)*size);
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice);
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost);
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
printf("\nTEST PASSED\n");
}
|
280a71a1420bb3ade64ec43921451a44fab99cf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ComputeAdjacencyMatrix(float* dOut, int* nn, int n, int k)
{
// Get the column that the current thread is responsible for
auto col = blockIdx.x * blockDim.x + threadIdx.x;
// If id is within bounds
if(col < n)
{
auto nnCol = &nn[col * n];
for(auto i = 0; i < k; ++i)
{
dOut[col * n + nnCol[i]] = dOut[col + n * nnCol[i]] = 1.0f;
}
}
} | 280a71a1420bb3ade64ec43921451a44fab99cf5.cu | #include "includes.h"
__global__ void ComputeAdjacencyMatrix(float* dOut, int* nn, int n, int k)
{
// Get the column that the current thread is responsible for
auto col = blockIdx.x * blockDim.x + threadIdx.x;
// If id is within bounds
if(col < n)
{
auto nnCol = &nn[col * n];
for(auto i = 0; i < k; ++i)
{
dOut[col * n + nnCol[i]] = dOut[col + n * nnCol[i]] = 1.0f;
}
}
} |
b70889af713e06332e43b11e8f9f0924bfe7e4e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dcn_v2_im2col_cuda.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const float *data_im, const float *data_offset, const float *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
float *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = static_cast<float>(0);
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
void modulated_deformable_im2col_cuda(hipStream_t stream,
const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
| b70889af713e06332e43b11e8f9f0924bfe7e4e6.cu | #include "dcn_v2_im2col_cuda.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const float *data_im, const float *data_offset, const float *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
float *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = static_cast<float>(0);
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
void modulated_deformable_im2col_cuda(cudaStream_t stream,
const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
|
0cfed467d8fe2933b4ffb27b97b692855f7e8291.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#include <string>
#define NUM_THREADS 10000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
if (argc < 2) return 1;
std::string mode = std::string(argv[1]);
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
if (mode.compare("naive") == 0) {
hipLaunchKernelGGL(( increment_naive), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
}
else if (mode.compare("atomic") == 0) {
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
}
else {
printf("The argument must be either naive or atomic\n");
return 1;
}
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
} | 0cfed467d8fe2933b4ffb27b97b692855f7e8291.cu | #include <stdio.h>
#include "gputimer.h"
#include <string>
#define NUM_THREADS 10000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
if (argc < 2) return 1;
std::string mode = std::string(argv[1]);
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
if (mode.compare("naive") == 0) {
increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
}
else if (mode.compare("atomic") == 0) {
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
}
else {
printf("The argument must be either naive or atomic\n");
return 1;
}
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
} |
099984deaa2731589190473d7693cb10e02e4fa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* wcsphColagrossiLandrini.cu
*
* Author: Kamil Szewc (kamil.szewc@gmail.com)
* Modified on: 27-09-2014
*
*/
#include <thrust/device_vector.h>
#include "../sph.h"
#include "../errlog.h"
#include "wcsphSzewcOlejnik.cuh"
#include "general/calcTimeStep/calcTimeStep.cuh"
#include "general/calcHydrostaticPressure/calcHydrostaticPressure.cuh"
#include "general/calcShearRate/calcShearRate.cuh"
#include "general/calcDispersedPhase/calcDispersedPhase.cuh"
#include "general/calcTurbulentViscosity/calcTurbulentViscosity.cuh"
#include "general/calcSingleSolidParticleAcceleration/calcSingleSolidParticleAcceleration.cuh"
#include "general/smoothingDensity/smoothingDensity.cuh"
#include "../methods/hashSortReorder.cuh"
#include "../methods/copyParticles.cuh"
void modelWcsphSzewcOlejnik(int NOB, int TPB,
thrust::device_vector<Particle>& pVector,
Particle *pSort,
ParticleBasic *pOld,
uint *gridParticleHash,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
thrust::device_vector<ParticleDispersedPhase>& pDispersedPhaseVector,
Parameters *par,
Parameters *parHost,
real time)
{
STARTLOG("logs/models.log");
Particle* p = thrust::raw_pointer_cast(pVector.data());
ParticleDispersedPhase* pDispersedPhase = thrust::raw_pointer_cast(pDispersedPhaseVector.data());
calcTimeStep(pVector, par, parHost);
hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N);
copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par);
static int step = 1;
if ((parHost->T_SMOOTHING_DENSITY != 0) && (step%parHost->T_SMOOTHING_DENSITY == 0))
{
smoothingDensity << <NOB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("smoothingDensity");
}
step++;
if (parHost->T_HYDROSTATIC_PRESSURE !=0)
{
hipLaunchKernelGGL(( calcHydrostaticPressure) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcHydrostaticPressure");
}
hipLaunchKernelGGL(( calcPressureSO) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureSO");
if ( (parHost->T_TURBULENCE != 0) || (parHost->T_SOIL != 0) )
{
hipLaunchKernelGGL(( calcShearRate) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcShearRate");
}
if (parHost->T_TURBULENCE != 0)
{
hipLaunchKernelGGL(( calcTurbulentViscosity) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcTurbulentViscosity");
}
if (parHost->T_SOIL != 0)
{
hipLaunchKernelGGL(( calcSoilViscositySO) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcSoilViscositySO");
}
calcInteractionSO << <NOB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionSO");
if (parHost->T_SURFACE_TENSION != 0) {
}
if (parHost->T_XSPH != 0) {
}
if (parHost->T_DISPERSED_PHASE > 0)
{
calcDispersedPhaseField << <(parHost->N_DISPERSED_PHASE + TPB - 1) / TPB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, pDispersedPhase, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDispersedPhaseField");
calcDispersedPhaseAdvection << <(parHost->N_DISPERSED_PHASE + TPB - 1) / TPB, TPB >> >(pDispersedPhase, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDispersedPhaseAdvection");
}
copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par);
if (parHost->T_SOLID_PARTICLE != 0)
{
calcSingleSolidParticleAcceleration(NOB, TPB, pVector, par);
}
hipLaunchKernelGGL(( calcAdvectionSO) , dim3(NOB), dim3(TPB), 0, 0, p, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionSO");
} | 099984deaa2731589190473d7693cb10e02e4fa6.cu | /*
* wcsphColagrossiLandrini.cu
*
* Author: Kamil Szewc (kamil.szewc@gmail.com)
* Modified on: 27-09-2014
*
*/
#include <thrust/device_vector.h>
#include "../sph.h"
#include "../errlog.h"
#include "wcsphSzewcOlejnik.cuh"
#include "general/calcTimeStep/calcTimeStep.cuh"
#include "general/calcHydrostaticPressure/calcHydrostaticPressure.cuh"
#include "general/calcShearRate/calcShearRate.cuh"
#include "general/calcDispersedPhase/calcDispersedPhase.cuh"
#include "general/calcTurbulentViscosity/calcTurbulentViscosity.cuh"
#include "general/calcSingleSolidParticleAcceleration/calcSingleSolidParticleAcceleration.cuh"
#include "general/smoothingDensity/smoothingDensity.cuh"
#include "../methods/hashSortReorder.cuh"
#include "../methods/copyParticles.cuh"
void modelWcsphSzewcOlejnik(int NOB, int TPB,
thrust::device_vector<Particle>& pVector,
Particle *pSort,
ParticleBasic *pOld,
uint *gridParticleHash,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
thrust::device_vector<ParticleDispersedPhase>& pDispersedPhaseVector,
Parameters *par,
Parameters *parHost,
real time)
{
STARTLOG("logs/models.log");
Particle* p = thrust::raw_pointer_cast(pVector.data());
ParticleDispersedPhase* pDispersedPhase = thrust::raw_pointer_cast(pDispersedPhaseVector.data());
calcTimeStep(pVector, par, parHost);
hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N);
copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par);
static int step = 1;
if ((parHost->T_SMOOTHING_DENSITY != 0) && (step%parHost->T_SMOOTHING_DENSITY == 0))
{
smoothingDensity << <NOB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("smoothingDensity");
}
step++;
if (parHost->T_HYDROSTATIC_PRESSURE !=0)
{
calcHydrostaticPressure <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcHydrostaticPressure");
}
calcPressureSO <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureSO");
if ( (parHost->T_TURBULENCE != 0) || (parHost->T_SOIL != 0) )
{
calcShearRate <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcShearRate");
}
if (parHost->T_TURBULENCE != 0)
{
calcTurbulentViscosity <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcTurbulentViscosity");
}
if (parHost->T_SOIL != 0)
{
calcSoilViscositySO <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcSoilViscositySO");
}
calcInteractionSO << <NOB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionSO");
if (parHost->T_SURFACE_TENSION != 0) {
}
if (parHost->T_XSPH != 0) {
}
if (parHost->T_DISPERSED_PHASE > 0)
{
calcDispersedPhaseField << <(parHost->N_DISPERSED_PHASE + TPB - 1) / TPB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, pDispersedPhase, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDispersedPhaseField");
calcDispersedPhaseAdvection << <(parHost->N_DISPERSED_PHASE + TPB - 1) / TPB, TPB >> >(pDispersedPhase, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDispersedPhaseAdvection");
}
copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par);
if (parHost->T_SOLID_PARTICLE != 0)
{
calcSingleSolidParticleAcceleration(NOB, TPB, pVector, par);
}
calcAdvectionSO <<<NOB, TPB>>>(p, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionSO");
} |
466e4f01ced4ee2db8f11a6e7c70e73b86a8ab98.hip | // !!! This is a file automatically generated by hipify!!!
/*
Matt Dean - 1422434 - mxd434
Goals implemented:
- Block scan for arbitrary length small vectors - 'blockscan' function
- Full scan for arbitrary length large vectors - 'scan' function
This function decides whether to perform a small (one block) scan or a full (n-level) scan depending on the length of the input vector
- BCAO for both scans
Hardware:
CPU - Intel Core i5-4670k @ 3.4GHz
GPU - NVIDIA GeForce GTX 760
Timings:
10,000,000 Elements
host : 20749 ms
gpu : 7.860768 ms
gpu bcao : 4.304064 ms
For more results please see the comment at the bottom of this file
Extra work:
Due to the recursive nature of the full scan it can handle n > 3 levels
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
// scan.cuh
long sequential_scan(int* output, int* input, int length);
float blockscan(int *output, int *input, int length, bool bcao);
float scan(int *output, int *input, int length, bool bcao);
void scanLargeDeviceArray(int *output, int *input, int length, bool bcao);
void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao);
void scanLargeEvenDeviceArray(int *output, int *input, int length, bool bcao);
// kernels.cuh
__global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo);
__global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo);
__global__ void prescan_large(int *output, int *input, int n, int* sums);
__global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums);
__global__ void add(int *output, int length, int *n1);
__global__ void add(int *output, int length, int *n1, int *n2);
// utils.h
void _checkCudaError(const char *message, hipError_t err, const char *caller);
void printResult(const char* prefix, int result, long nanoseconds);
void printResult(const char* prefix, int result, float milliseconds);
bool isPowerOfTwo(int x);
int nextPowerOfTwo(int x);
long get_nanos();
/*///////////////////////////////////*/
/* Main.cpp */
/*///////////////////////////////////*/
void test(int N) {
bool canBeBlockscanned = N <= 1024;
time_t t;
srand((unsigned)time(&t));
int *in = new int[N];
for (int i = 0; i < N; i++) {
in[i] = rand() % 10;
}
printf("%i Elements \n", N);
// sequential scan on CPU
int *outHost = new int[N]();
long time_host = sequential_scan(outHost, in, N);
printResult("host ", outHost[N - 1], time_host);
// full scan
int *outGPU = new int[N]();
float time_gpu = scan(outGPU, in, N, false);
printResult("gpu ", outGPU[N - 1], time_gpu);
// full scan with BCAO
int *outGPU_bcao = new int[N]();
float time_gpu_bcao = scan(outGPU_bcao, in, N, true);
printResult("gpu bcao", outGPU_bcao[N - 1], time_gpu_bcao);
if (canBeBlockscanned) {
// basic level 1 block scan
int *out_1block = new int[N]();
float time_1block = blockscan(out_1block, in, N, false);
printResult("level 1 ", out_1block[N - 1], time_1block);
// level 1 block scan with BCAO
int *out_1block_bcao = new int[N]();
float time_1block_bcao = blockscan(out_1block_bcao, in, N, true);
printResult("l1 bcao ", out_1block_bcao[N - 1], time_1block_bcao);
delete[] out_1block;
delete[] out_1block_bcao;
}
printf("\n");
delete[] in;
delete[] outHost;
delete[] outGPU;
delete[] outGPU_bcao;
}
int main()
{
int TEN_MILLION = 10000000;
int ONE_MILLION = 1000000;
int TEN_THOUSAND = 10000;
int elements[] = {
TEN_MILLION * 2,
TEN_MILLION,
ONE_MILLION,
TEN_THOUSAND,
5000,
4096,
2048,
2000,
1000,
500,
100,
64,
8,
5
};
int numElements = sizeof(elements) / sizeof(elements[0]);
for (int i = 0; i < numElements; i++) {
test(elements[i]);
}
return 0;
}
/*///////////////////////////////////*/
/* scan.cu */
/*///////////////////////////////////*/
#define checkCudaError(o, l) _checkCudaError(o, l, __func__)
int THREADS_PER_BLOCK = 512;
int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2;
long sequential_scan(int* output, int* input, int length) {
long start_time = get_nanos();
output[0] = 0; // since this is a prescan, not a scan
for (int j = 1; j < length; ++j)
{
output[j] = input[j - 1] + output[j - 1];
}
long end_time = get_nanos();
return end_time - start_time;
}
float blockscan(int *output, int *input, int length, bool bcao) {
int *d_out, *d_in;
const int arraySize = length * sizeof(int);
hipMalloc((void **)&d_out, arraySize);
hipMalloc((void **)&d_in, arraySize);
hipMemcpy(d_out, output, arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_in, input, arraySize, hipMemcpyHostToDevice);
// start timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int powerOfTwo = nextPowerOfTwo(length);
if (bcao) {
hipLaunchKernelGGL(( prescan_arbitrary), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo);
}
else {
hipLaunchKernelGGL(( prescan_arbitrary_unoptimized), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo);
}
// end timer
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsedTime = 0;
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(output, d_out, arraySize, hipMemcpyDeviceToHost);
hipFree(d_out);
hipFree(d_in);
hipEventDestroy(start);
hipEventDestroy(stop);
return elapsedTime;
}
float scan(int *output, int *input, int length, bool bcao) {
int *d_out, *d_in;
const int arraySize = length * sizeof(int);
hipMalloc((void **)&d_out, arraySize);
hipMalloc((void **)&d_in, arraySize);
hipMemcpy(d_out, output, arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_in, input, arraySize, hipMemcpyHostToDevice);
// start timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
if (length > ELEMENTS_PER_BLOCK) {
scanLargeDeviceArray(d_out, d_in, length, bcao);
}
else {
scanSmallDeviceArray(d_out, d_in, length, bcao);
}
// end timer
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsedTime = 0;
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(output, d_out, arraySize, hipMemcpyDeviceToHost);
hipFree(d_out);
hipFree(d_in);
hipEventDestroy(start);
hipEventDestroy(stop);
return elapsedTime;
}
void scanLargeDeviceArray(int *d_out, int *d_in, int length, bool bcao) {
int remainder = length % (ELEMENTS_PER_BLOCK);
if (remainder == 0) {
scanLargeEvenDeviceArray(d_out, d_in, length, bcao);
}
else {
// perform a large scan on a compatible multiple of elements
int lengthMultiple = length - remainder;
scanLargeEvenDeviceArray(d_out, d_in, lengthMultiple, bcao);
// scan the remaining elements and add the (inclusive) last element of the large scan to this
int *startOfOutputArray = &(d_out[lengthMultiple]);
scanSmallDeviceArray(startOfOutputArray, &(d_in[lengthMultiple]), remainder, bcao);
hipLaunchKernelGGL(( add), dim3(1), dim3(remainder), 0, 0, startOfOutputArray, remainder, &(d_in[lengthMultiple - 1]), &(d_out[lengthMultiple - 1]));
}
}
void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao) {
int powerOfTwo = nextPowerOfTwo(length);
if (bcao) {
hipLaunchKernelGGL(( prescan_arbitrary), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo);
}
else {
hipLaunchKernelGGL(( prescan_arbitrary_unoptimized), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo);
}
}
void scanLargeEvenDeviceArray(int *d_out, int *d_in, int length, bool bcao) {
const int blocks = length / ELEMENTS_PER_BLOCK;
const int sharedMemArraySize = ELEMENTS_PER_BLOCK * sizeof(int);
int *d_sums, *d_incr;
hipMalloc((void **)&d_sums, blocks * sizeof(int));
hipMalloc((void **)&d_incr, blocks * sizeof(int));
if (bcao) {
hipLaunchKernelGGL(( prescan_large), dim3(blocks), dim3(THREADS_PER_BLOCK), 2 * sharedMemArraySize, 0, d_out, d_in, ELEMENTS_PER_BLOCK, d_sums);
}
else {
hipLaunchKernelGGL(( prescan_large_unoptimized), dim3(blocks), dim3(THREADS_PER_BLOCK), 2 * sharedMemArraySize, 0, d_out, d_in, ELEMENTS_PER_BLOCK, d_sums);
}
const int sumsArrThreadsNeeded = (blocks + 1) / 2;
if (sumsArrThreadsNeeded > THREADS_PER_BLOCK) {
// perform a large scan on the sums arr
scanLargeDeviceArray(d_incr, d_sums, blocks, bcao);
}
else {
// only need one block to scan sums arr so can use small scan
scanSmallDeviceArray(d_incr, d_sums, blocks, bcao);
}
hipLaunchKernelGGL(( add), dim3(blocks), dim3(ELEMENTS_PER_BLOCK), 0, 0, d_out, ELEMENTS_PER_BLOCK, d_incr);
hipFree(d_sums);
hipFree(d_incr);
}
/*///////////////////////////////////*/
/* kernels.cu */
/*///////////////////////////////////*/
#define SHARED_MEMORY_BANKS 32
#define LOG_MEM_BANKS 5
// There were two BCAO optimisations in the paper - this one is fastest
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS)
__global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo)
{
extern __shared__ int temp[];// allocated on invocation
int threadID = threadIdx.x;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
if (threadID < n) {
temp[ai + bankOffsetA] = input[ai];
temp[bi + bankOffsetB] = input[bi];
}
else {
temp[ai + bankOffsetA] = 0;
temp[bi + bankOffsetB] = 0;
}
int offset = 1;
for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (threadID == 0) {
temp[powerOfTwo - 1 + CONFLICT_FREE_OFFSET(powerOfTwo - 1)] = 0; // clear the last element
}
for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if (threadID < n) {
output[ai] = temp[ai + bankOffsetA];
output[bi] = temp[bi + bankOffsetB];
}
}
__global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo) {
extern __shared__ int temp[];// allocated on invocation
int threadID = threadIdx.x;
if (threadID < n) {
temp[2 * threadID] = input[2 * threadID]; // load input into shared memory
temp[2 * threadID + 1] = input[2 * threadID + 1];
}
else {
temp[2 * threadID] = 0;
temp[2 * threadID + 1] = 0;
}
int offset = 1;
for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (threadID == 0) { temp[powerOfTwo - 1] = 0; } // clear the last element
for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if (threadID < n) {
output[2 * threadID] = temp[2 * threadID]; // write results to device memory
output[2 * threadID + 1] = temp[2 * threadID + 1];
}
}
__global__ void prescan_large(int *output, int *input, int n, int *sums) {
extern __shared__ int temp[];
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * n;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
temp[ai + bankOffsetA] = input[blockOffset + ai];
temp[bi + bankOffsetB] = input[blockOffset + bi];
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
if (threadID == 0) {
sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)];
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
output[blockOffset + ai] = temp[ai + bankOffsetA];
output[blockOffset + bi] = temp[bi + bankOffsetB];
}
__global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * n;
extern __shared__ int temp[];
temp[2 * threadID] = input[blockOffset + (2 * threadID)];
temp[2 * threadID + 1] = input[blockOffset + (2 * threadID) + 1];
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
if (threadID == 0) {
sums[blockID] = temp[n - 1];
temp[n - 1] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
output[blockOffset + (2 * threadID)] = temp[2 * threadID];
output[blockOffset + (2 * threadID) + 1] = temp[2 * threadID + 1];
}
__global__ void add(int *output, int length, int *n) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n[blockID];
}
__global__ void add(int *output, int length, int *n1, int *n2) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n1[blockID] + n2[blockID];
}
/*///////////////////////////////////*/
/* utils.cpp */
/*///////////////////////////////////*/
void _checkCudaError(const char *message, hipError_t err, const char *caller) {
if (err != hipSuccess) {
fprintf(stderr, "Error in: %s\n", caller);
fprintf(stderr, message);
fprintf(stderr, ": %s\n", hipGetErrorString(err));
exit(0);
}
}
void printResult(const char* prefix, int result, long nanoseconds) {
printf(" ");
printf(prefix);
printf(" : %i in %ld ms \n", result, nanoseconds / 1000);
}
void printResult(const char* prefix, int result, float milliseconds) {
printf(" ");
printf(prefix);
printf(" : %i in %f ms \n", result, milliseconds);
}
// from https://stackoverflow.com/a/3638454
bool isPowerOfTwo(int x) {
return x && !(x & (x - 1));
}
// from https://stackoverflow.com/a/12506181
int nextPowerOfTwo(int x) {
int power = 1;
while (power < x) {
power *= 2;
}
return power;
}
// from https://stackoverflow.com/a/36095407
// Get the current time in nanoseconds
long get_nanos() {
struct timespec ts;
timespec_get(&ts, TIME_UTC);
return (long)ts.tv_sec * 1000000000L + ts.tv_nsec;
}
/*
Timings
'level 1' = blockscan
'l1 bcao' = blockscan with bcao
The number before the time is the final element of the scanned array
20000000 Elements
host : 89997032 in 42338 ms
gpu : 89997032 in 16.285631 ms
gpu bcao : 89997032 in 8.554880 ms
10000000 Elements
host : 44983528 in 20749 ms
gpu : 44983528 in 7.860768 ms
gpu bcao : 44983528 in 4.304064 ms
1000000 Elements
host : 4494474 in 2105 ms
gpu : 4494474 in 0.975648 ms
gpu bcao : 4494474 in 0.600416 ms
10000 Elements
host : 45078 in 19 ms
gpu : 45078 in 0.213760 ms
gpu bcao : 45078 in 0.192128 ms
5000 Elements
host : 22489 in 11 ms
gpu : 22489 in 0.169312 ms
gpu bcao : 22489 in 0.148832 ms
4096 Elements
host : 18294 in 9 ms
gpu : 18294 in 0.132672 ms
gpu bcao : 18294 in 0.128480 ms
2048 Elements
host : 9149 in 4 ms
gpu : 9149 in 0.140736 ms
gpu bcao : 9149 in 0.126944 ms
2000 Elements
host : 8958 in 3 ms
gpu : 8958 in 0.178912 ms
gpu bcao : 8958 in 0.214464 ms
1000 Elements
host : 4483 in 2 ms
gpu : 4483 in 0.020128 ms
gpu bcao : 4483 in 0.010784 ms
level 1 : 4483 in 0.018080 ms
l1 bcao : 4483 in 0.010400 ms
500 Elements
host : 2203 in 4 ms
gpu : 2203 in 0.013440 ms
gpu bcao : 2203 in 0.009664 ms
level 1 : 2203 in 0.013280 ms
l1 bcao : 2203 in 0.010176 ms
100 Elements
host : 356 in 0 ms
gpu : 356 in 0.008512 ms
gpu bcao : 356 in 0.009280 ms
level 1 : 356 in 0.008896 ms
l1 bcao : 356 in 0.009056 ms
64 Elements
host : 221 in 0 ms
gpu : 221 in 0.007584 ms
gpu bcao : 221 in 0.008960 ms
level 1 : 221 in 0.007360 ms
l1 bcao : 221 in 0.008352 ms
8 Elements
host : 24 in 0 ms
gpu : 24 in 0.006240 ms
gpu bcao : 24 in 0.007392 ms
level 1 : 24 in 0.006176 ms
l1 bcao : 24 in 0.007424 ms
5 Elements
host : 12 in 0 ms
gpu : 12 in 0.006144 ms
gpu bcao : 12 in 0.007296 ms
level 1 : 12 in 0.006048 ms
l1 bcao : 12 in 0.007328 ms
*/ | 466e4f01ced4ee2db8f11a6e7c70e73b86a8ab98.cu | /*
Matt Dean - 1422434 - mxd434
Goals implemented:
- Block scan for arbitrary length small vectors - 'blockscan' function
- Full scan for arbitrary length large vectors - 'scan' function
This function decides whether to perform a small (one block) scan or a full (n-level) scan depending on the length of the input vector
- BCAO for both scans
Hardware:
CPU - Intel Core i5-4670k @ 3.4GHz
GPU - NVIDIA GeForce GTX 760
Timings:
10,000,000 Elements
host : 20749 ms
gpu : 7.860768 ms
gpu bcao : 4.304064 ms
For more results please see the comment at the bottom of this file
Extra work:
Due to the recursive nature of the full scan it can handle n > 3 levels
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
// scan.cuh
long sequential_scan(int* output, int* input, int length);
float blockscan(int *output, int *input, int length, bool bcao);
float scan(int *output, int *input, int length, bool bcao);
void scanLargeDeviceArray(int *output, int *input, int length, bool bcao);
void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao);
void scanLargeEvenDeviceArray(int *output, int *input, int length, bool bcao);
// kernels.cuh
__global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo);
__global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo);
__global__ void prescan_large(int *output, int *input, int n, int* sums);
__global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums);
__global__ void add(int *output, int length, int *n1);
__global__ void add(int *output, int length, int *n1, int *n2);
// utils.h
void _checkCudaError(const char *message, cudaError_t err, const char *caller);
void printResult(const char* prefix, int result, long nanoseconds);
void printResult(const char* prefix, int result, float milliseconds);
bool isPowerOfTwo(int x);
int nextPowerOfTwo(int x);
long get_nanos();
/*///////////////////////////////////*/
/* Main.cpp */
/*///////////////////////////////////*/
void test(int N) {
bool canBeBlockscanned = N <= 1024;
time_t t;
srand((unsigned)time(&t));
int *in = new int[N];
for (int i = 0; i < N; i++) {
in[i] = rand() % 10;
}
printf("%i Elements \n", N);
// sequential scan on CPU
int *outHost = new int[N]();
long time_host = sequential_scan(outHost, in, N);
printResult("host ", outHost[N - 1], time_host);
// full scan
int *outGPU = new int[N]();
float time_gpu = scan(outGPU, in, N, false);
printResult("gpu ", outGPU[N - 1], time_gpu);
// full scan with BCAO
int *outGPU_bcao = new int[N]();
float time_gpu_bcao = scan(outGPU_bcao, in, N, true);
printResult("gpu bcao", outGPU_bcao[N - 1], time_gpu_bcao);
if (canBeBlockscanned) {
// basic level 1 block scan
int *out_1block = new int[N]();
float time_1block = blockscan(out_1block, in, N, false);
printResult("level 1 ", out_1block[N - 1], time_1block);
// level 1 block scan with BCAO
int *out_1block_bcao = new int[N]();
float time_1block_bcao = blockscan(out_1block_bcao, in, N, true);
printResult("l1 bcao ", out_1block_bcao[N - 1], time_1block_bcao);
delete[] out_1block;
delete[] out_1block_bcao;
}
printf("\n");
delete[] in;
delete[] outHost;
delete[] outGPU;
delete[] outGPU_bcao;
}
int main()
{
int TEN_MILLION = 10000000;
int ONE_MILLION = 1000000;
int TEN_THOUSAND = 10000;
int elements[] = {
TEN_MILLION * 2,
TEN_MILLION,
ONE_MILLION,
TEN_THOUSAND,
5000,
4096,
2048,
2000,
1000,
500,
100,
64,
8,
5
};
int numElements = sizeof(elements) / sizeof(elements[0]);
for (int i = 0; i < numElements; i++) {
test(elements[i]);
}
return 0;
}
/*///////////////////////////////////*/
/* scan.cu */
/*///////////////////////////////////*/
#define checkCudaError(o, l) _checkCudaError(o, l, __func__)
int THREADS_PER_BLOCK = 512;
int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2;
long sequential_scan(int* output, int* input, int length) {
long start_time = get_nanos();
output[0] = 0; // since this is a prescan, not a scan
for (int j = 1; j < length; ++j)
{
output[j] = input[j - 1] + output[j - 1];
}
long end_time = get_nanos();
return end_time - start_time;
}
float blockscan(int *output, int *input, int length, bool bcao) {
int *d_out, *d_in;
const int arraySize = length * sizeof(int);
cudaMalloc((void **)&d_out, arraySize);
cudaMalloc((void **)&d_in, arraySize);
cudaMemcpy(d_out, output, arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_in, input, arraySize, cudaMemcpyHostToDevice);
// start timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int powerOfTwo = nextPowerOfTwo(length);
if (bcao) {
prescan_arbitrary<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo);
}
else {
prescan_arbitrary_unoptimized<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo);
}
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsedTime = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(output, d_out, arraySize, cudaMemcpyDeviceToHost);
cudaFree(d_out);
cudaFree(d_in);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsedTime;
}
float scan(int *output, int *input, int length, bool bcao) {
int *d_out, *d_in;
const int arraySize = length * sizeof(int);
cudaMalloc((void **)&d_out, arraySize);
cudaMalloc((void **)&d_in, arraySize);
cudaMemcpy(d_out, output, arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_in, input, arraySize, cudaMemcpyHostToDevice);
// start timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
if (length > ELEMENTS_PER_BLOCK) {
scanLargeDeviceArray(d_out, d_in, length, bcao);
}
else {
scanSmallDeviceArray(d_out, d_in, length, bcao);
}
// end timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsedTime = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(output, d_out, arraySize, cudaMemcpyDeviceToHost);
cudaFree(d_out);
cudaFree(d_in);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsedTime;
}
void scanLargeDeviceArray(int *d_out, int *d_in, int length, bool bcao) {
int remainder = length % (ELEMENTS_PER_BLOCK);
if (remainder == 0) {
scanLargeEvenDeviceArray(d_out, d_in, length, bcao);
}
else {
// perform a large scan on a compatible multiple of elements
int lengthMultiple = length - remainder;
scanLargeEvenDeviceArray(d_out, d_in, lengthMultiple, bcao);
// scan the remaining elements and add the (inclusive) last element of the large scan to this
int *startOfOutputArray = &(d_out[lengthMultiple]);
scanSmallDeviceArray(startOfOutputArray, &(d_in[lengthMultiple]), remainder, bcao);
add<<<1, remainder>>>(startOfOutputArray, remainder, &(d_in[lengthMultiple - 1]), &(d_out[lengthMultiple - 1]));
}
}
void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao) {
int powerOfTwo = nextPowerOfTwo(length);
if (bcao) {
prescan_arbitrary<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo);
}
else {
prescan_arbitrary_unoptimized<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo);
}
}
void scanLargeEvenDeviceArray(int *d_out, int *d_in, int length, bool bcao) {
const int blocks = length / ELEMENTS_PER_BLOCK;
const int sharedMemArraySize = ELEMENTS_PER_BLOCK * sizeof(int);
int *d_sums, *d_incr;
cudaMalloc((void **)&d_sums, blocks * sizeof(int));
cudaMalloc((void **)&d_incr, blocks * sizeof(int));
if (bcao) {
prescan_large<<<blocks, THREADS_PER_BLOCK, 2 * sharedMemArraySize>>>(d_out, d_in, ELEMENTS_PER_BLOCK, d_sums);
}
else {
prescan_large_unoptimized<<<blocks, THREADS_PER_BLOCK, 2 * sharedMemArraySize>>>(d_out, d_in, ELEMENTS_PER_BLOCK, d_sums);
}
const int sumsArrThreadsNeeded = (blocks + 1) / 2;
if (sumsArrThreadsNeeded > THREADS_PER_BLOCK) {
// perform a large scan on the sums arr
scanLargeDeviceArray(d_incr, d_sums, blocks, bcao);
}
else {
// only need one block to scan sums arr so can use small scan
scanSmallDeviceArray(d_incr, d_sums, blocks, bcao);
}
add<<<blocks, ELEMENTS_PER_BLOCK>>>(d_out, ELEMENTS_PER_BLOCK, d_incr);
cudaFree(d_sums);
cudaFree(d_incr);
}
/*///////////////////////////////////*/
/* kernels.cu */
/*///////////////////////////////////*/
#define SHARED_MEMORY_BANKS 32
#define LOG_MEM_BANKS 5
// There were two BCAO optimisations in the paper - this one is fastest
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS)
__global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo)
{
extern __shared__ int temp[];// allocated on invocation
int threadID = threadIdx.x;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
if (threadID < n) {
temp[ai + bankOffsetA] = input[ai];
temp[bi + bankOffsetB] = input[bi];
}
else {
temp[ai + bankOffsetA] = 0;
temp[bi + bankOffsetB] = 0;
}
int offset = 1;
for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (threadID == 0) {
temp[powerOfTwo - 1 + CONFLICT_FREE_OFFSET(powerOfTwo - 1)] = 0; // clear the last element
}
for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if (threadID < n) {
output[ai] = temp[ai + bankOffsetA];
output[bi] = temp[bi + bankOffsetB];
}
}
__global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo) {
extern __shared__ int temp[];// allocated on invocation
int threadID = threadIdx.x;
if (threadID < n) {
temp[2 * threadID] = input[2 * threadID]; // load input into shared memory
temp[2 * threadID + 1] = input[2 * threadID + 1];
}
else {
temp[2 * threadID] = 0;
temp[2 * threadID + 1] = 0;
}
int offset = 1;
for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (threadID == 0) { temp[powerOfTwo - 1] = 0; } // clear the last element
for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if (threadID < n) {
output[2 * threadID] = temp[2 * threadID]; // write results to device memory
output[2 * threadID + 1] = temp[2 * threadID + 1];
}
}
__global__ void prescan_large(int *output, int *input, int n, int *sums) {
extern __shared__ int temp[];
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * n;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
temp[ai + bankOffsetA] = input[blockOffset + ai];
temp[bi + bankOffsetB] = input[blockOffset + bi];
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
if (threadID == 0) {
sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)];
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
output[blockOffset + ai] = temp[ai + bankOffsetA];
output[blockOffset + bi] = temp[bi + bankOffsetB];
}
__global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * n;
extern __shared__ int temp[];
temp[2 * threadID] = input[blockOffset + (2 * threadID)];
temp[2 * threadID + 1] = input[blockOffset + (2 * threadID) + 1];
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
if (threadID == 0) {
sums[blockID] = temp[n - 1];
temp[n - 1] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
output[blockOffset + (2 * threadID)] = temp[2 * threadID];
output[blockOffset + (2 * threadID) + 1] = temp[2 * threadID + 1];
}
__global__ void add(int *output, int length, int *n) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n[blockID];
}
__global__ void add(int *output, int length, int *n1, int *n2) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n1[blockID] + n2[blockID];
}
/*///////////////////////////////////*/
/* utils.cpp */
/*///////////////////////////////////*/
void _checkCudaError(const char *message, cudaError_t err, const char *caller) {
if (err != cudaSuccess) {
fprintf(stderr, "Error in: %s\n", caller);
fprintf(stderr, message);
fprintf(stderr, ": %s\n", cudaGetErrorString(err));
exit(0);
}
}
void printResult(const char* prefix, int result, long nanoseconds) {
printf(" ");
printf(prefix);
printf(" : %i in %ld ms \n", result, nanoseconds / 1000);
}
void printResult(const char* prefix, int result, float milliseconds) {
printf(" ");
printf(prefix);
printf(" : %i in %f ms \n", result, milliseconds);
}
// from https://stackoverflow.com/a/3638454
bool isPowerOfTwo(int x) {
return x && !(x & (x - 1));
}
// from https://stackoverflow.com/a/12506181
int nextPowerOfTwo(int x) {
int power = 1;
while (power < x) {
power *= 2;
}
return power;
}
// from https://stackoverflow.com/a/36095407
// Get the current time in nanoseconds
long get_nanos() {
struct timespec ts;
timespec_get(&ts, TIME_UTC);
return (long)ts.tv_sec * 1000000000L + ts.tv_nsec;
}
/*
Timings
'level 1' = blockscan
'l1 bcao' = blockscan with bcao
The number before the time is the final element of the scanned array
20000000 Elements
host : 89997032 in 42338 ms
gpu : 89997032 in 16.285631 ms
gpu bcao : 89997032 in 8.554880 ms
10000000 Elements
host : 44983528 in 20749 ms
gpu : 44983528 in 7.860768 ms
gpu bcao : 44983528 in 4.304064 ms
1000000 Elements
host : 4494474 in 2105 ms
gpu : 4494474 in 0.975648 ms
gpu bcao : 4494474 in 0.600416 ms
10000 Elements
host : 45078 in 19 ms
gpu : 45078 in 0.213760 ms
gpu bcao : 45078 in 0.192128 ms
5000 Elements
host : 22489 in 11 ms
gpu : 22489 in 0.169312 ms
gpu bcao : 22489 in 0.148832 ms
4096 Elements
host : 18294 in 9 ms
gpu : 18294 in 0.132672 ms
gpu bcao : 18294 in 0.128480 ms
2048 Elements
host : 9149 in 4 ms
gpu : 9149 in 0.140736 ms
gpu bcao : 9149 in 0.126944 ms
2000 Elements
host : 8958 in 3 ms
gpu : 8958 in 0.178912 ms
gpu bcao : 8958 in 0.214464 ms
1000 Elements
host : 4483 in 2 ms
gpu : 4483 in 0.020128 ms
gpu bcao : 4483 in 0.010784 ms
level 1 : 4483 in 0.018080 ms
l1 bcao : 4483 in 0.010400 ms
500 Elements
host : 2203 in 4 ms
gpu : 2203 in 0.013440 ms
gpu bcao : 2203 in 0.009664 ms
level 1 : 2203 in 0.013280 ms
l1 bcao : 2203 in 0.010176 ms
100 Elements
host : 356 in 0 ms
gpu : 356 in 0.008512 ms
gpu bcao : 356 in 0.009280 ms
level 1 : 356 in 0.008896 ms
l1 bcao : 356 in 0.009056 ms
64 Elements
host : 221 in 0 ms
gpu : 221 in 0.007584 ms
gpu bcao : 221 in 0.008960 ms
level 1 : 221 in 0.007360 ms
l1 bcao : 221 in 0.008352 ms
8 Elements
host : 24 in 0 ms
gpu : 24 in 0.006240 ms
gpu bcao : 24 in 0.007392 ms
level 1 : 24 in 0.006176 ms
l1 bcao : 24 in 0.007424 ms
5 Elements
host : 12 in 0 ms
gpu : 12 in 0.006144 ms
gpu bcao : 12 in 0.007296 ms
level 1 : 12 in 0.006048 ms
l1 bcao : 12 in 0.007328 ms
*/ |
7f3a51268a1110381eadd2d619e72d91f6f0c875.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
// #include <opencv2/opencv.hpp>
// #include <string>
// #include <iostream>
#include "caffe/layer.hpp"
#include "caffe/layers/l1_loss_layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
// using namespace cv;
// using namespace std;
namespace caffe
{
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
// TODO maybe change the way of detecting NaNs
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype *in, Dtype *out /*, const Dtype min_diff_*/)
{
CUDA_KERNEL_LOOP(index, n)
{
// Dtype min_diff = min_diff_;
// out[index] = (abs(in[index]) >= min_diff && in[index==in[index]]) ? Dtype(1) : Dtype(0);
out[index] = in[index] == in[index] ? Dtype(1) : Dtype(0);
// out[index] = in[index]!=0 ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void SetMask(const int n, const Dtype *in, Dtype *out /*, const Dtype min_diff_*/)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] != Dtype(0) ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] == in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype *in, Dtype *out, Dtype plateau)
{
CUDA_KERNEL_LOOP(index, n)
{
if (fabs(in[index]) < plateau)
out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype *in, Dtype *out, Dtype plateau)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top)
{
Blob<Dtype> *diffptr = diff_top_vec_[0];
vector<Blob<Dtype> *> bottom_1_2;
bottom_1_2.push_back(bottom[0]);
if (bottom.size() > 1)
bottom_1_2.push_back(bottom[1]);
Dtype dot, loss;
if (bottom_1_2.size() > 1)
{
diff_layer_->Forward(bottom_1_2, diff_top_vec_);
}
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
hipLaunchKernelGGL(( FindNotNaNs<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diffptr->gpu_data(), mask_.mutable_gpu_data() /*, Dtype(min_diff_)*/);
//hipLaunchKernelGGL(( FindNotNaNs<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[1]->gpu_data(), mask_.mutable_gpu_data());
/// DEBUG
// Mat mask_image(bottom[0]->height(), bottom[0]->width(), CV_32FC1);
// hipMemcpy(mask_image.data, mask_.mutable_gpu_data(), count*sizeof(Dtype), hipMemcpyDefault);
// string filename;
// cout << "Enter mask image name to save: ";
// cin >> filename;
// imwrite(filename.c_str(), mask_image*255);
// LOG(INFO) << "Masks saved to " << filename;
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
if (bottom.size() == 3)
{
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
count = bottom[2]->count();
hipLaunchKernelGGL(( SetMask<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[2]->gpu_data(), mask_.mutable_gpu_data());
// DEBUG
// Mat mask_image(bottom[2]->height(), bottom[2]->width(), CV_32FC1);
// hipMemcpy(mask_image.data, mask_.mutable_gpu_data(), count * sizeof(Dtype), hipMemcpyDefault);
// string filename = "1.jpg";
// cout << "Enter mask image name to save: ";
// namedWindow("mask");
// imshow("mask", mask_image * 255);
// LOG(INFO) << "Masks saved to " << filename;
}
if (this->layer_param_.l1_loss_param().normalize_by_num_entries())
{
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_);
normalize_coeff_ /= mask_.channels();
}
else
{
normalize_coeff_ = num;
}
if (this->layer_param_.l1_loss_param().l2_per_location())
{
// set masked (NaNs only) to zero
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if (this->layer_param_.l1_loss_param().plateau() > 0)
{
float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau();
hipLaunchKernelGGL(( MaskPlateauValuesInitial<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
}
else
{
// Mask plateau:
if (this->layer_param_.l1_loss_param().plateau() > 0)
{
hipLaunchKernelGGL(( MaskPlateauValues<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau());
CUDA_POST_KERNEL_CHECK;
}
//mask_.print("MASK2");
// set masked (NaNs, plateau) to zero
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ComputeSign<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), sign_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot);
}
loss = dot / normalize_coeff_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom)
{
bool prop_down = propagate_down[0];
vector<Blob<Dtype> *> bottom_1_2;
bottom_1_2.push_back(bottom[0]);
if (bottom.size() > 1)
bottom_1_2.push_back(bottom[1]);
if (bottom_1_2.size() > 1)
prop_down |= propagate_down[1];
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down)
{
const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_;
if (this->layer_param_.l1_loss_param().l2_per_location())
{
vector<bool> prop_down(1, true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if (this->layer_param_.l1_loss_param().plateau() > 0)
{
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
}
else
{
caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(),
Dtype(0), diffptr->mutable_gpu_diff());
}
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(diffptr->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (bottom_1_2.size() > 1)
{
diff_layer_->Backward(diff_top_vec_, propagate_down, bottom_1_2);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer);
} // namespace caffe
| 7f3a51268a1110381eadd2d619e72d91f6f0c875.cu | #include <vector>
// #include <opencv2/opencv.hpp>
// #include <string>
// #include <iostream>
#include "caffe/layer.hpp"
#include "caffe/layers/l1_loss_layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
// using namespace cv;
// using namespace std;
namespace caffe
{
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
// TODO maybe change the way of detecting NaNs
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype *in, Dtype *out /*, const Dtype min_diff_*/)
{
CUDA_KERNEL_LOOP(index, n)
{
// Dtype min_diff = min_diff_;
// out[index] = (abs(in[index]) >= min_diff && in[index==in[index]]) ? Dtype(1) : Dtype(0);
out[index] = in[index] == in[index] ? Dtype(1) : Dtype(0);
// out[index] = in[index]!=0 ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void SetMask(const int n, const Dtype *in, Dtype *out /*, const Dtype min_diff_*/)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] != Dtype(0) ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] == in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n)
{
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype *in, Dtype *out, Dtype plateau)
{
CUDA_KERNEL_LOOP(index, n)
{
if (fabs(in[index]) < plateau)
out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype *in, Dtype *out, Dtype plateau)
{
CUDA_KERNEL_LOOP(index, n)
{
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top)
{
Blob<Dtype> *diffptr = diff_top_vec_[0];
vector<Blob<Dtype> *> bottom_1_2;
bottom_1_2.push_back(bottom[0]);
if (bottom.size() > 1)
bottom_1_2.push_back(bottom[1]);
Dtype dot, loss;
if (bottom_1_2.size() > 1)
{
diff_layer_->Forward(bottom_1_2, diff_top_vec_);
}
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
FindNotNaNs<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, diffptr->gpu_data(), mask_.mutable_gpu_data() /*, Dtype(min_diff_)*/);
// FindNotNaNs<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, bottom[1]->gpu_data(), mask_.mutable_gpu_data());
/// DEBUG
// Mat mask_image(bottom[0]->height(), bottom[0]->width(), CV_32FC1);
// cudaMemcpy(mask_image.data, mask_.mutable_gpu_data(), count*sizeof(Dtype), cudaMemcpyDefault);
// string filename;
// cout << "Enter mask image name to save: ";
// cin >> filename;
// imwrite(filename.c_str(), mask_image*255);
// LOG(INFO) << "Masks saved to " << filename;
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
if (bottom.size() == 3)
{
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
count = bottom[2]->count();
SetMask<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, bottom[2]->gpu_data(), mask_.mutable_gpu_data());
// DEBUG
// Mat mask_image(bottom[2]->height(), bottom[2]->width(), CV_32FC1);
// cudaMemcpy(mask_image.data, mask_.mutable_gpu_data(), count * sizeof(Dtype), cudaMemcpyDefault);
// string filename = "1.jpg";
// cout << "Enter mask image name to save: ";
// namedWindow("mask");
// imshow("mask", mask_image * 255);
// LOG(INFO) << "Masks saved to " << filename;
}
if (this->layer_param_.l1_loss_param().normalize_by_num_entries())
{
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_);
normalize_coeff_ /= mask_.channels();
}
else
{
normalize_coeff_ = num;
}
if (this->layer_param_.l1_loss_param().l2_per_location())
{
// set masked (NaNs only) to zero
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if (this->layer_param_.l1_loss_param().plateau() > 0)
{
float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau();
MaskPlateauValuesInitial<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
}
else
{
// Mask plateau:
if (this->layer_param_.l1_loss_param().plateau() > 0)
{
MaskPlateauValues<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau());
CUDA_POST_KERNEL_CHECK;
}
//mask_.print("MASK2");
// set masked (NaNs, plateau) to zero
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
ComputeSign<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), sign_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot);
}
loss = dot / normalize_coeff_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom)
{
bool prop_down = propagate_down[0];
vector<Blob<Dtype> *> bottom_1_2;
bottom_1_2.push_back(bottom[0]);
if (bottom.size() > 1)
bottom_1_2.push_back(bottom[1]);
if (bottom_1_2.size() > 1)
prop_down |= propagate_down[1];
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down)
{
const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_;
if (this->layer_param_.l1_loss_param().l2_per_location())
{
vector<bool> prop_down(1, true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if (this->layer_param_.l1_loss_param().plateau() > 0)
{
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
}
else
{
caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(),
Dtype(0), diffptr->mutable_gpu_diff());
}
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS>>>(
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (bottom_1_2.size() > 1)
{
diff_layer_->Backward(diff_top_vec_, propagate_down, bottom_1_2);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer);
} // namespace caffe
|
2a5b4d6754fb19a14ede9efdad66e9cd2fca2a00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This example introduces CUDA's abstraction of data parallel computational
// "kernels", or __global__ functions. A __global__ function acts like the
// main() function of a GPU program, and is allowed to manipulate device
// memory directly.
#include <stdlib.h>
#include <stdio.h>
// "kernels" or __global__ functions are the entry points to code that executes on the GPU
// The keyword __global__ indicates to the compiler that this function is a GPU entry point.
// __global__ functions must return void, and may only be called or "launched" from code that
// executes on the CPU.
__global__ void kernel(int *array)
{
// compute the index of this particular thread
// in the grid:
// multiply the index of this thread's block (blockIdx.x)
// by the number of threads per block (blockDim.x)
// and add the index of this thread inside its block (threadIdx.x)
int index = blockIdx.x * blockDim.x + threadIdx.x;
// write out 7 to a single element of the array using standard
// array indexing notation:
array[index] = 7;
}
int main(void)
{
// create arrays of 256 elements
int num_elements = 256;
// compute the size of the arrays in bytes
int num_bytes = num_elements * sizeof(int);
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int*)malloc(num_bytes);
// hipMalloc a device array
hipMalloc((void**)&device_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_array == 0 || device_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// launch the global function by choosing the number of CUDA threads
// to instantiate:
// choose a number of threads per block
// 128 threads (4 warps) tends to be a good number
int block_size = 128;
// divide the number of elements to process by the block size
// to determine the number of blocks to launch
int grid_size = num_elements / block_size;
// To invoke the global function, use the triple chevron notation.
// The first argument is the number of blocks (grid_size).
// The second argument is the number of threads per block (block_size).
// This is called "configuring" the launch.
// After the triple chevrons, pass function arguments as normal.
hipLaunchKernelGGL(( kernel), dim3(grid_size),dim3(block_size), 0, 0, device_array);
// download and inspect the result on the host:
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
// print out the result element by element
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n");
// deallocate memory
free(host_array);
hipFree(device_array);
}
| 2a5b4d6754fb19a14ede9efdad66e9cd2fca2a00.cu | // This example introduces CUDA's abstraction of data parallel computational
// "kernels", or __global__ functions. A __global__ function acts like the
// main() function of a GPU program, and is allowed to manipulate device
// memory directly.
#include <stdlib.h>
#include <stdio.h>
// "kernels" or __global__ functions are the entry points to code that executes on the GPU
// The keyword __global__ indicates to the compiler that this function is a GPU entry point.
// __global__ functions must return void, and may only be called or "launched" from code that
// executes on the CPU.
__global__ void kernel(int *array)
{
// compute the index of this particular thread
// in the grid:
// multiply the index of this thread's block (blockIdx.x)
// by the number of threads per block (blockDim.x)
// and add the index of this thread inside its block (threadIdx.x)
int index = blockIdx.x * blockDim.x + threadIdx.x;
// write out 7 to a single element of the array using standard
// array indexing notation:
array[index] = 7;
}
int main(void)
{
// create arrays of 256 elements
int num_elements = 256;
// compute the size of the arrays in bytes
int num_bytes = num_elements * sizeof(int);
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int*)malloc(num_bytes);
// cudaMalloc a device array
cudaMalloc((void**)&device_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_array == 0 || device_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// launch the global function by choosing the number of CUDA threads
// to instantiate:
// choose a number of threads per block
// 128 threads (4 warps) tends to be a good number
int block_size = 128;
// divide the number of elements to process by the block size
// to determine the number of blocks to launch
int grid_size = num_elements / block_size;
// To invoke the global function, use the triple chevron notation.
// The first argument is the number of blocks (grid_size).
// The second argument is the number of threads per block (block_size).
// This is called "configuring" the launch.
// After the triple chevrons, pass function arguments as normal.
kernel<<<grid_size,block_size>>>(device_array);
// download and inspect the result on the host:
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print out the result element by element
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n");
// deallocate memory
free(host_array);
cudaFree(device_array);
}
|
f512a6427fae625b0052ec862f0db1817158f71b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
//@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2019) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#include <experimental/mdspan>
#include <memory>
#include <random>
#include <sstream>
#include <stdexcept>
#include "sum_3d_common.hpp"
#include "fill.hpp"
//================================================================================
static constexpr int warpsPerBlock = 4;
//================================================================================
template <class T, size_t... Es>
using lmdspan = stdex::mdspan<T, stdex::extents<Es...>, stdex::layout_left>;
template <class T, size_t... Es>
using rmdspan = stdex::mdspan<T, stdex::extents<Es...>, stdex::layout_right>;
//================================================================================
template <class Tp>
MDSPAN_FORCE_INLINE_FUNCTION inline
void DoNotOptimize(Tp const& value) {
// Can't have m constraints on device
asm volatile("" : : "r"(value) : "memory");
}
template <class Tp>
MDSPAN_FORCE_INLINE_FUNCTION inline
void DoNotOptimize(Tp& value) {
// Can't have m constraints on device
asm volatile("" : "+r"(value) : : "memory");
}
//================================================================================
void throw_runtime_exception(const std::string &msg) {
std::ostringstream o;
o << msg;
throw std::runtime_error(o.str());
}
void cuda_internal_error_throw(hipError_t e, const char* name,
const char* file = NULL, const int line = 0) {
std::ostringstream out;
out << name << " error( " << hipGetErrorName(e)
<< "): " << hipGetErrorString(e);
if (file) {
out << " " << file << ":" << line;
}
throw_runtime_exception(out.str());
}
inline void cuda_internal_safe_call(hipError_t e, const char* name,
const char* file = NULL,
const int line = 0) {
if (hipSuccess != e) {
cuda_internal_error_throw(e, name, file, line);
}
}
#define CUDA_SAFE_CALL(call) \
cuda_internal_safe_call(call, #call, __FILE__, __LINE__)
//================================================================================
dim3 get_bench_grid() {
hipDeviceProp_t cudaProp;
CUDA_SAFE_CALL(hipGetDeviceProperties(&cudaProp, 0));
return dim3(cudaProp.multiProcessorCount, 1, 1);
}
dim3 get_bench_thread_block() {
hipDeviceProp_t cudaProp;
CUDA_SAFE_CALL(hipGetDeviceProperties(&cudaProp, 1));
return dim3(1, cudaProp.warpSize, warpsPerBlock);
}
template <class F, class... Args>
__global__
void do_run_kernel(F f, Args... args) {
f(args...);
}
template <class F, class... Args>
float run_kernel_timed(F&& f, Args&&... args) {
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
CUDA_SAFE_CALL(hipEventRecord(start));
hipLaunchKernelGGL(( do_run_kernel), dim3(get_bench_grid()), dim3(get_bench_thread_block()), 0, 0,
(F&&)f, ((Args&&) args)...
);
CUDA_SAFE_CALL(hipEventRecord(stop));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float milliseconds = 0;
CUDA_SAFE_CALL(hipEventElapsedTime(&milliseconds, start, stop));
return milliseconds;
}
template <class MDSpan, class... DynSizes>
MDSpan fill_device_mdspan(MDSpan, DynSizes... dyn) {
using value_type = typename MDSpan::value_type;
auto buffer_size = MDSpan{nullptr, dyn...}.mapping().required_span_size();
auto host_buffer = std::make_unique<value_type[]>(
MDSpan{nullptr, dyn...}.mapping().required_span_size()
);
auto host_mdspan = MDSpan{host_buffer.get(), dyn...};
mdspan_benchmark::fill_random(host_mdspan);
value_type* device_buffer = nullptr;
CUDA_SAFE_CALL(hipMalloc(&device_buffer, buffer_size * sizeof(value_type)));
CUDA_SAFE_CALL(hipMemcpy(
device_buffer, host_buffer.get(), buffer_size * sizeof(value_type), hipMemcpyHostToDevice
));
return MDSpan{device_buffer, dyn...};
}
//================================================================================
template <class MDSpan, class... DynSizes>
void BM_MDSpan_Cuda_Sum_3D(benchmark::State& state, MDSpan, DynSizes... dyn) {
using value_type = typename MDSpan::value_type;
auto s = fill_device_mdspan(MDSpan{}, dyn...);
int repeats = s.size() > (100*100*100) ? 50 : 1000;
for (auto _ : state) {
auto timed = run_kernel_timed(
[=] __device__ {
for(int r = 0; r < repeats; ++r) {
value_type sum_local = 0;
for(size_t i = blockIdx.x; i < s.extent(0); i += gridDim.x) {
for(size_t j = threadIdx.z; j < s.extent(1); j += blockDim.z) {
for(size_t k = threadIdx.y; k < s.extent(2); k += blockDim.y) {
sum_local += s(i, j, k);
}
}
}
DoNotOptimize(*(volatile value_type*)(&s(0,0,0)) = sum_local);
asm volatile ("": : :"memory");
}
}
);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
state.SetBytesProcessed(s.size() * sizeof(value_type) * state.iterations() * repeats);
state.counters["repeats"] = repeats;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipFree(s.data()));
}
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, right_, rmdspan, 80, 80, 80);
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, left_, lmdspan, 80, 80, 80);
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, right_, rmdspan, 400, 400, 400);
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, left_, lmdspan, 400, 400, 400);
//================================================================================
template <class T, class SizeX, class SizeY, class SizeZ>
void BM_Raw_Cuda_Sum_3D_right(benchmark::State& state, T, SizeX x, SizeY y, SizeZ z) {
using value_type = T;
value_type* data = nullptr;
{
// just for setup...
auto wrapped = stdex::mdspan<T, stdex::dextents<1>>{};
auto s = fill_device_mdspan(wrapped, x*y*z);
data = s.data();
}
int repeats = x*y*z > (100*100*100) ? 50 : 1000;
for (auto _ : state) {
auto timed = run_kernel_timed(
[=] __device__ {
for(int r = 0; r < repeats; ++r) {
value_type sum_local = 0;
for(size_t i = blockIdx.x; i < x; i += gridDim.x) {
for(size_t j = threadIdx.z; j < y; j += blockDim.z) {
for(size_t k = threadIdx.y; k < z; k += blockDim.y) {
sum_local += data[k + j*z + i*z*y];
}
}
}
DoNotOptimize(*(volatile value_type*)(&data[0]) = sum_local);
asm volatile ("": : :"memory");
}
}
);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
state.SetBytesProcessed(x * y * z * sizeof(value_type) * state.iterations() * repeats);
state.counters["repeats"] = repeats;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipFree(data));
}
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_right, size_80_80_80, int(), 80, 80, 80);
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_right, size_400_400_400, int(), 400, 400, 400);
//================================================================================
template <class T, class SizeX, class SizeY, class SizeZ>
void BM_Raw_Cuda_Sum_3D_left(benchmark::State& state, T, SizeX x, SizeY y, SizeZ z) {
using value_type = T;
value_type* data = nullptr;
{
// just for setup...
auto wrapped = stdex::mdspan<T, stdex::dextents<1>>{};
auto s = fill_device_mdspan(wrapped, x*y*z);
data = s.data();
}
int repeats = x*y*z > (100*100*100) ? 50 : 1000;
for (auto _ : state) {
auto timed = run_kernel_timed(
[=] __device__ {
for(int r = 0; r < repeats; ++r) {
value_type sum_local = 0;
for(size_t i = blockIdx.x; i < x; i += gridDim.x) {
for(size_t j = threadIdx.z; j < y; j += blockDim.z) {
for(size_t k = threadIdx.y; k < z; k += blockDim.y) {
sum_local += data[k*x*y + j*x + i];
}
}
}
DoNotOptimize(*(volatile value_type*)(&data[0]) = sum_local);
asm volatile ("": : :"memory");
}
}
);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
state.SetBytesProcessed(x * y * z * sizeof(value_type) * state.iterations() * repeats);
state.counters["repeats"] = repeats;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipFree(data));
}
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_left, size_80_80_80, int(), 80, 80, 80);
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_left, size_400_400_400, int(), 400, 400, 400);
//================================================================================
BENCHMARK_MAIN();
| f512a6427fae625b0052ec862f0db1817158f71b.cu | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 2.0
// Copyright (2019) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
//
// ************************************************************************
//@HEADER
*/
#include <experimental/mdspan>
#include <memory>
#include <random>
#include <sstream>
#include <stdexcept>
#include "sum_3d_common.hpp"
#include "fill.hpp"
//================================================================================
static constexpr int warpsPerBlock = 4;
//================================================================================
template <class T, size_t... Es>
using lmdspan = stdex::mdspan<T, stdex::extents<Es...>, stdex::layout_left>;
template <class T, size_t... Es>
using rmdspan = stdex::mdspan<T, stdex::extents<Es...>, stdex::layout_right>;
//================================================================================
template <class Tp>
MDSPAN_FORCE_INLINE_FUNCTION inline
void DoNotOptimize(Tp const& value) {
// Can't have m constraints on device
asm volatile("" : : "r"(value) : "memory");
}
template <class Tp>
MDSPAN_FORCE_INLINE_FUNCTION inline
void DoNotOptimize(Tp& value) {
// Can't have m constraints on device
asm volatile("" : "+r"(value) : : "memory");
}
//================================================================================
void throw_runtime_exception(const std::string &msg) {
std::ostringstream o;
o << msg;
throw std::runtime_error(o.str());
}
void cuda_internal_error_throw(cudaError e, const char* name,
const char* file = NULL, const int line = 0) {
std::ostringstream out;
out << name << " error( " << cudaGetErrorName(e)
<< "): " << cudaGetErrorString(e);
if (file) {
out << " " << file << ":" << line;
}
throw_runtime_exception(out.str());
}
inline void cuda_internal_safe_call(cudaError e, const char* name,
const char* file = NULL,
const int line = 0) {
if (cudaSuccess != e) {
cuda_internal_error_throw(e, name, file, line);
}
}
#define CUDA_SAFE_CALL(call) \
cuda_internal_safe_call(call, #call, __FILE__, __LINE__)
//================================================================================
dim3 get_bench_grid() {
cudaDeviceProp cudaProp;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&cudaProp, 0));
return dim3(cudaProp.multiProcessorCount, 1, 1);
}
dim3 get_bench_thread_block() {
cudaDeviceProp cudaProp;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&cudaProp, 1));
return dim3(1, cudaProp.warpSize, warpsPerBlock);
}
template <class F, class... Args>
__global__
void do_run_kernel(F f, Args... args) {
f(args...);
}
template <class F, class... Args>
float run_kernel_timed(F&& f, Args&&... args) {
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
CUDA_SAFE_CALL(cudaEventRecord(start));
do_run_kernel<<<get_bench_grid(), get_bench_thread_block()>>>(
(F&&)f, ((Args&&) args)...
);
CUDA_SAFE_CALL(cudaEventRecord(stop));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float milliseconds = 0;
CUDA_SAFE_CALL(cudaEventElapsedTime(&milliseconds, start, stop));
return milliseconds;
}
template <class MDSpan, class... DynSizes>
MDSpan fill_device_mdspan(MDSpan, DynSizes... dyn) {
using value_type = typename MDSpan::value_type;
auto buffer_size = MDSpan{nullptr, dyn...}.mapping().required_span_size();
auto host_buffer = std::make_unique<value_type[]>(
MDSpan{nullptr, dyn...}.mapping().required_span_size()
);
auto host_mdspan = MDSpan{host_buffer.get(), dyn...};
mdspan_benchmark::fill_random(host_mdspan);
value_type* device_buffer = nullptr;
CUDA_SAFE_CALL(cudaMalloc(&device_buffer, buffer_size * sizeof(value_type)));
CUDA_SAFE_CALL(cudaMemcpy(
device_buffer, host_buffer.get(), buffer_size * sizeof(value_type), cudaMemcpyHostToDevice
));
return MDSpan{device_buffer, dyn...};
}
//================================================================================
template <class MDSpan, class... DynSizes>
void BM_MDSpan_Cuda_Sum_3D(benchmark::State& state, MDSpan, DynSizes... dyn) {
using value_type = typename MDSpan::value_type;
auto s = fill_device_mdspan(MDSpan{}, dyn...);
int repeats = s.size() > (100*100*100) ? 50 : 1000;
for (auto _ : state) {
auto timed = run_kernel_timed(
[=] __device__ {
for(int r = 0; r < repeats; ++r) {
value_type sum_local = 0;
for(size_t i = blockIdx.x; i < s.extent(0); i += gridDim.x) {
for(size_t j = threadIdx.z; j < s.extent(1); j += blockDim.z) {
for(size_t k = threadIdx.y; k < s.extent(2); k += blockDim.y) {
sum_local += s(i, j, k);
}
}
}
DoNotOptimize(*(volatile value_type*)(&s(0,0,0)) = sum_local);
asm volatile ("": : :"memory");
}
}
);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
state.SetBytesProcessed(s.size() * sizeof(value_type) * state.iterations() * repeats);
state.counters["repeats"] = repeats;
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaFree(s.data()));
}
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, right_, rmdspan, 80, 80, 80);
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, left_, lmdspan, 80, 80, 80);
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, right_, rmdspan, 400, 400, 400);
MDSPAN_BENCHMARK_ALL_3D_MANUAL(BM_MDSpan_Cuda_Sum_3D, left_, lmdspan, 400, 400, 400);
//================================================================================
template <class T, class SizeX, class SizeY, class SizeZ>
void BM_Raw_Cuda_Sum_3D_right(benchmark::State& state, T, SizeX x, SizeY y, SizeZ z) {
using value_type = T;
value_type* data = nullptr;
{
// just for setup...
auto wrapped = stdex::mdspan<T, stdex::dextents<1>>{};
auto s = fill_device_mdspan(wrapped, x*y*z);
data = s.data();
}
int repeats = x*y*z > (100*100*100) ? 50 : 1000;
for (auto _ : state) {
auto timed = run_kernel_timed(
[=] __device__ {
for(int r = 0; r < repeats; ++r) {
value_type sum_local = 0;
for(size_t i = blockIdx.x; i < x; i += gridDim.x) {
for(size_t j = threadIdx.z; j < y; j += blockDim.z) {
for(size_t k = threadIdx.y; k < z; k += blockDim.y) {
sum_local += data[k + j*z + i*z*y];
}
}
}
DoNotOptimize(*(volatile value_type*)(&data[0]) = sum_local);
asm volatile ("": : :"memory");
}
}
);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
state.SetBytesProcessed(x * y * z * sizeof(value_type) * state.iterations() * repeats);
state.counters["repeats"] = repeats;
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaFree(data));
}
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_right, size_80_80_80, int(), 80, 80, 80);
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_right, size_400_400_400, int(), 400, 400, 400);
//================================================================================
template <class T, class SizeX, class SizeY, class SizeZ>
void BM_Raw_Cuda_Sum_3D_left(benchmark::State& state, T, SizeX x, SizeY y, SizeZ z) {
using value_type = T;
value_type* data = nullptr;
{
// just for setup...
auto wrapped = stdex::mdspan<T, stdex::dextents<1>>{};
auto s = fill_device_mdspan(wrapped, x*y*z);
data = s.data();
}
int repeats = x*y*z > (100*100*100) ? 50 : 1000;
for (auto _ : state) {
auto timed = run_kernel_timed(
[=] __device__ {
for(int r = 0; r < repeats; ++r) {
value_type sum_local = 0;
for(size_t i = blockIdx.x; i < x; i += gridDim.x) {
for(size_t j = threadIdx.z; j < y; j += blockDim.z) {
for(size_t k = threadIdx.y; k < z; k += blockDim.y) {
sum_local += data[k*x*y + j*x + i];
}
}
}
DoNotOptimize(*(volatile value_type*)(&data[0]) = sum_local);
asm volatile ("": : :"memory");
}
}
);
// units of cuda timer is milliseconds, units of iteration timer is seconds
state.SetIterationTime(timed * 1e-3);
}
state.SetBytesProcessed(x * y * z * sizeof(value_type) * state.iterations() * repeats);
state.counters["repeats"] = repeats;
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaFree(data));
}
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_left, size_80_80_80, int(), 80, 80, 80);
BENCHMARK_CAPTURE(BM_Raw_Cuda_Sum_3D_left, size_400_400_400, int(), 400, 400, 400);
//================================================================================
BENCHMARK_MAIN();
|
ffabe832e458165f9e51c000df6d49828f9004f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///////////////////////////////////////////////////////////////////////////////
//
// The MIT License
//
// Copyright (c) 2006 Scientific Computing and Imaging Institute,
// University of Utah (USA)
//
// License for the specific language governing rights and limitations under
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef ELVIS_VOLUME_RENDERING_SINGLE_RAY_PER_SEGMENT_CU
#define ELVIS_VOLUME_RENDERING_SINGLE_RAY_PER_SEGMENT_CU
#include <ElVis/Core/Float.cu>
#include <ElVis/Core/FieldEvaluator.cu>
#include <ElVis/Math/TrapezoidalIntegration.hpp>
#include <ElVis/Core/TransferFunction.h>
#include <math_functions.h>
namespace ElVis
{
extern "C" __global__ void IntegrateSegmentSingleThreadPerRayRiemann(
ElVisFloat3 origin,
const int* __restrict__ segmentElementId,
const int* __restrict__ segmentElementType,
const ElVisFloat3* __restrict__ segmentDirection,
const ElVisFloat* __restrict__ segmentStart,
const ElVisFloat* __restrict__ segmentEnd,
int fieldId,
TransferFunction* transferFunction,
ElVisFloat epsilon,
ElVisFloat desiredH,
uint screenx,
uint screeny,
bool enableTrace,
int tracex,
int tracey,
int* numSamples,
ElVisFloat* __restrict__ densityAccumulator,
ElVisFloat3* __restrict__ colorAccumulator)
{
int2 trace = make_int2(tracex, tracey);
uint2 pixel;
pixel.x = blockIdx.x * blockDim.x + threadIdx.x;
pixel.y = blockIdx.y * blockDim.y + threadIdx.y;
bool traceEnabled =
(pixel.x == trace.x && pixel.y == trace.y && enableTrace);
uint2 screen = make_uint2(screenx, screeny);
if (pixel.x >= screen.x || pixel.y >= screen.y)
{
return;
}
int segmentIndex = pixel.x + screen.x * pixel.y;
if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0))
{
return;
}
int elementId = segmentElementId[segmentIndex];
if (elementId == -1)
{
return;
}
int elementTypeId = segmentElementType[segmentIndex];
ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex];
ElVisFloat3 color = colorAccumulator[segmentIndex];
ElVisFloat a = segmentStart[segmentIndex];
ElVisFloat b = segmentEnd[segmentIndex];
ElVisFloat3 dir = segmentDirection[segmentIndex];
ElVisFloat d = (b - a);
if (d == MAKE_FLOAT(0.0))
{
return;
}
int n = Floor(d / desiredH);
ElVisFloat h;
if (n <= 1)
{
h = b - a;
n = 1;
}
else
{
h = d / (ElVisFloat)(n - 1);
}
if (traceEnabled)
{
// ELVIS_PRINTF("Total segment range: [%2.15f, %2.15f], segment Id %d\n",
// segmentStart[segmentIndex], segmentEnd[segmentIndex], segmentIndex);
// ELVIS_PRINTF("D = %2.15f, H = %2.15f, N = %d\n", d, h, n);
}
// First test for density identically 0. This means the segment does not
// contribute at
// all to the integral and can be skipped.
FieldEvaluator f;
f.Origin = origin;
f.Direction = dir;
f.ElementId = elementId;
f.ElementType = elementTypeId;
f.sampleCount = numSamples;
f.FieldId = fieldId;
ElVisFloat s0 = f(a);
ElVisFloat d0 = transferFunction->Sample(eDensity, s0);
ElVisFloat3 color0 = transferFunction->SampleColor(s0);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * color0 * d0 * atten;
accumulatedDensity += d0 * h;
for (int i = 1; i < n; ++i)
{
ElVisFloat t = a + i * h;
ElVisFloat sample = f(t);
ElVisFloat densityValue = transferFunction->Sample(eDensity, sample);
ElVisFloat3 sampleColor = transferFunction->SampleColor(sample);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * sampleColor * densityValue * atten;
accumulatedDensity += densityValue * h;
}
densityAccumulator[segmentIndex] = accumulatedDensity;
colorAccumulator[segmentIndex] = color;
}
extern "C" __global__ void Trapezoidal_SingleThreadPerRay(
ElVisFloat3 origin,
const int* __restrict__ segmentElementId,
const int* __restrict__ segmentElementType,
const ElVisFloat3* __restrict__ segmentDirection,
const ElVisFloat* __restrict__ segmentStart,
const ElVisFloat* __restrict__ segmentEnd,
int fieldId,
TransferFunction* transferFunction,
ElVisFloat epsilon,
ElVisFloat desiredH,
uint screenx,
uint screeny,
bool enableTrace,
int tracex,
int tracey,
int* numSamples,
ElVisFloat* __restrict__ densityAccumulator,
ElVisFloat3* __restrict__ colorAccumulator)
{
int2 trace = make_int2(tracex, tracey);
uint2 pixel;
pixel.x = blockIdx.x * blockDim.x + threadIdx.x;
pixel.y = blockIdx.y * blockDim.y + threadIdx.y;
bool traceEnabled =
(pixel.x == trace.x && pixel.y == trace.y && enableTrace);
uint2 screen = make_uint2(screenx, screeny);
if (pixel.x >= screen.x || pixel.y >= screen.y)
{
return;
}
int segmentIndex = pixel.x + screen.x * pixel.y;
if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0))
{
return;
}
int elementId = segmentElementId[segmentIndex];
if (elementId == -1)
{
return;
}
int elementTypeId = segmentElementType[segmentIndex];
if (traceEnabled)
{
// ELVIS_PRINTF("Trapezoidal_SingleThreadPerRay: Processing segment id
// %d\n", segmentIndex);
}
ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex];
ElVisFloat3 color = colorAccumulator[segmentIndex];
ElVisFloat a = segmentStart[segmentIndex];
ElVisFloat b = segmentEnd[segmentIndex];
ElVisFloat3 dir = segmentDirection[segmentIndex];
ElVisFloat d = (b - a);
if (d == MAKE_FLOAT(0.0))
{
return;
}
int n = Floor(d / desiredH);
ElVisFloat h;
if (n == 0)
{
h = b - a;
n = 1;
}
else
{
h = d / (ElVisFloat)(n);
}
// First test for density identically 0. This means the segment does not
// contribute at
// all to the integral and can be skipped.
FieldEvaluator f;
f.Origin = origin;
f.Direction = dir;
f.ElementId = elementId;
f.ElementType = elementTypeId;
f.sampleCount = numSamples;
f.FieldId = fieldId;
ElVisFloat s0 = f(a);
ElVisFloat3 color0 = transferFunction->SampleColor(s0);
ElVisFloat d0 = transferFunction->Sample(eDensity, s0);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * MAKE_FLOAT(.5) * color0 * d0 * atten;
for (int i = 1; i < n; ++i)
{
ElVisFloat t = a + i * h;
ElVisFloat sample = f(t);
ElVisFloat d1 = transferFunction->Sample(eDensity, sample);
accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + d1);
ElVisFloat3 colorSample = transferFunction->SampleColor(sample);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * colorSample * d1 * atten;
d0 = d1;
}
ElVisFloat sn = f(b);
ElVisFloat3 colorn = transferFunction->SampleColor(sn);
ElVisFloat dn = transferFunction->Sample(eDensity, sn);
accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + dn);
atten = expf(-accumulatedDensity);
color += h * MAKE_FLOAT(.5) * colorn * dn * atten;
densityAccumulator[segmentIndex] = accumulatedDensity;
colorAccumulator[segmentIndex] = color;
}
}
#endif // ELVIS_VOLUME_RENDERING_SINGLE_RAY_PER_SEGMENT_CU
| ffabe832e458165f9e51c000df6d49828f9004f8.cu | ///////////////////////////////////////////////////////////////////////////////
//
// The MIT License
//
// Copyright (c) 2006 Scientific Computing and Imaging Institute,
// University of Utah (USA)
//
// License for the specific language governing rights and limitations under
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef ELVIS_VOLUME_RENDERING_SINGLE_RAY_PER_SEGMENT_CU
#define ELVIS_VOLUME_RENDERING_SINGLE_RAY_PER_SEGMENT_CU
#include <ElVis/Core/Float.cu>
#include <ElVis/Core/FieldEvaluator.cu>
#include <ElVis/Math/TrapezoidalIntegration.hpp>
#include <ElVis/Core/TransferFunction.h>
#include <math_functions.h>
namespace ElVis
{
extern "C" __global__ void IntegrateSegmentSingleThreadPerRayRiemann(
ElVisFloat3 origin,
const int* __restrict__ segmentElementId,
const int* __restrict__ segmentElementType,
const ElVisFloat3* __restrict__ segmentDirection,
const ElVisFloat* __restrict__ segmentStart,
const ElVisFloat* __restrict__ segmentEnd,
int fieldId,
TransferFunction* transferFunction,
ElVisFloat epsilon,
ElVisFloat desiredH,
uint screenx,
uint screeny,
bool enableTrace,
int tracex,
int tracey,
int* numSamples,
ElVisFloat* __restrict__ densityAccumulator,
ElVisFloat3* __restrict__ colorAccumulator)
{
int2 trace = make_int2(tracex, tracey);
uint2 pixel;
pixel.x = blockIdx.x * blockDim.x + threadIdx.x;
pixel.y = blockIdx.y * blockDim.y + threadIdx.y;
bool traceEnabled =
(pixel.x == trace.x && pixel.y == trace.y && enableTrace);
uint2 screen = make_uint2(screenx, screeny);
if (pixel.x >= screen.x || pixel.y >= screen.y)
{
return;
}
int segmentIndex = pixel.x + screen.x * pixel.y;
if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0))
{
return;
}
int elementId = segmentElementId[segmentIndex];
if (elementId == -1)
{
return;
}
int elementTypeId = segmentElementType[segmentIndex];
ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex];
ElVisFloat3 color = colorAccumulator[segmentIndex];
ElVisFloat a = segmentStart[segmentIndex];
ElVisFloat b = segmentEnd[segmentIndex];
ElVisFloat3 dir = segmentDirection[segmentIndex];
ElVisFloat d = (b - a);
if (d == MAKE_FLOAT(0.0))
{
return;
}
int n = Floor(d / desiredH);
ElVisFloat h;
if (n <= 1)
{
h = b - a;
n = 1;
}
else
{
h = d / (ElVisFloat)(n - 1);
}
if (traceEnabled)
{
// ELVIS_PRINTF("Total segment range: [%2.15f, %2.15f], segment Id %d\n",
// segmentStart[segmentIndex], segmentEnd[segmentIndex], segmentIndex);
// ELVIS_PRINTF("D = %2.15f, H = %2.15f, N = %d\n", d, h, n);
}
// First test for density identically 0. This means the segment does not
// contribute at
// all to the integral and can be skipped.
FieldEvaluator f;
f.Origin = origin;
f.Direction = dir;
f.ElementId = elementId;
f.ElementType = elementTypeId;
f.sampleCount = numSamples;
f.FieldId = fieldId;
ElVisFloat s0 = f(a);
ElVisFloat d0 = transferFunction->Sample(eDensity, s0);
ElVisFloat3 color0 = transferFunction->SampleColor(s0);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * color0 * d0 * atten;
accumulatedDensity += d0 * h;
for (int i = 1; i < n; ++i)
{
ElVisFloat t = a + i * h;
ElVisFloat sample = f(t);
ElVisFloat densityValue = transferFunction->Sample(eDensity, sample);
ElVisFloat3 sampleColor = transferFunction->SampleColor(sample);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * sampleColor * densityValue * atten;
accumulatedDensity += densityValue * h;
}
densityAccumulator[segmentIndex] = accumulatedDensity;
colorAccumulator[segmentIndex] = color;
}
extern "C" __global__ void Trapezoidal_SingleThreadPerRay(
ElVisFloat3 origin,
const int* __restrict__ segmentElementId,
const int* __restrict__ segmentElementType,
const ElVisFloat3* __restrict__ segmentDirection,
const ElVisFloat* __restrict__ segmentStart,
const ElVisFloat* __restrict__ segmentEnd,
int fieldId,
TransferFunction* transferFunction,
ElVisFloat epsilon,
ElVisFloat desiredH,
uint screenx,
uint screeny,
bool enableTrace,
int tracex,
int tracey,
int* numSamples,
ElVisFloat* __restrict__ densityAccumulator,
ElVisFloat3* __restrict__ colorAccumulator)
{
int2 trace = make_int2(tracex, tracey);
uint2 pixel;
pixel.x = blockIdx.x * blockDim.x + threadIdx.x;
pixel.y = blockIdx.y * blockDim.y + threadIdx.y;
bool traceEnabled =
(pixel.x == trace.x && pixel.y == trace.y && enableTrace);
uint2 screen = make_uint2(screenx, screeny);
if (pixel.x >= screen.x || pixel.y >= screen.y)
{
return;
}
int segmentIndex = pixel.x + screen.x * pixel.y;
if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0))
{
return;
}
int elementId = segmentElementId[segmentIndex];
if (elementId == -1)
{
return;
}
int elementTypeId = segmentElementType[segmentIndex];
if (traceEnabled)
{
// ELVIS_PRINTF("Trapezoidal_SingleThreadPerRay: Processing segment id
// %d\n", segmentIndex);
}
ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex];
ElVisFloat3 color = colorAccumulator[segmentIndex];
ElVisFloat a = segmentStart[segmentIndex];
ElVisFloat b = segmentEnd[segmentIndex];
ElVisFloat3 dir = segmentDirection[segmentIndex];
ElVisFloat d = (b - a);
if (d == MAKE_FLOAT(0.0))
{
return;
}
int n = Floor(d / desiredH);
ElVisFloat h;
if (n == 0)
{
h = b - a;
n = 1;
}
else
{
h = d / (ElVisFloat)(n);
}
// First test for density identically 0. This means the segment does not
// contribute at
// all to the integral and can be skipped.
FieldEvaluator f;
f.Origin = origin;
f.Direction = dir;
f.ElementId = elementId;
f.ElementType = elementTypeId;
f.sampleCount = numSamples;
f.FieldId = fieldId;
ElVisFloat s0 = f(a);
ElVisFloat3 color0 = transferFunction->SampleColor(s0);
ElVisFloat d0 = transferFunction->Sample(eDensity, s0);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * MAKE_FLOAT(.5) * color0 * d0 * atten;
for (int i = 1; i < n; ++i)
{
ElVisFloat t = a + i * h;
ElVisFloat sample = f(t);
ElVisFloat d1 = transferFunction->Sample(eDensity, sample);
accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + d1);
ElVisFloat3 colorSample = transferFunction->SampleColor(sample);
ElVisFloat atten = expf(-accumulatedDensity);
color += h * colorSample * d1 * atten;
d0 = d1;
}
ElVisFloat sn = f(b);
ElVisFloat3 colorn = transferFunction->SampleColor(sn);
ElVisFloat dn = transferFunction->Sample(eDensity, sn);
accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + dn);
atten = expf(-accumulatedDensity);
color += h * MAKE_FLOAT(.5) * colorn * dn * atten;
densityAccumulator[segmentIndex] = accumulatedDensity;
colorAccumulator[segmentIndex] = color;
}
}
#endif // ELVIS_VOLUME_RENDERING_SINGLE_RAY_PER_SEGMENT_CU
|
2760032586ee333810f29f63cf8abcbd107d9966.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
namespace GameOfLifeCUDALibrary {
__global__ void InitWorld(int width, int height)
{
}
void init_world_cuda(int width, int height)
{
}
} | 2760032586ee333810f29f63cf8abcbd107d9966.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
namespace GameOfLifeCUDALibrary {
__global__ void InitWorld(int width, int height)
{
}
void init_world_cuda(int width, int height)
{
}
} |
e7abfcc07cb73a0d2cd350bab34596f3e3522331.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/zgemv_fermi.cu, normal z -> c, Thu Oct 8 23:05:32 2020
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
#define PRECISION_c
#include "gemv_template_device.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/******************************************************************************/
// NoTrans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
__global__ void
cgemvn_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvn_template_device<magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// Trans/ConjTans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans>
__global__ void
cgemvc_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvc_template_device< magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE, trans >
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// NoTrans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvn_template_fermi(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 );
dim3 threads( DIM_X, DIM_Y );
hipLaunchKernelGGL(( cgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
/******************************************************************************/
// Trans/ConjTans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvc_template_fermi(
magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 );
dim3 threads ( DIM_X, DIM_Y );
if (trans == MagmaConjTrans) {
hipLaunchKernelGGL(( cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
hipLaunchKernelGGL(( cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
/***************************************************************************//**
Purpose
-------
CGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta COMPLEX
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy COMPLEX array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv
*******************************************************************************/
extern "C" void
magmablas_cgemv(
magma_trans_t trans, magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
cgemvn_template_fermi<version(N, 106)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
else {
cgemvc_template_fermi<version(T, 189)>
( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
| e7abfcc07cb73a0d2cd350bab34596f3e3522331.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/zgemv_fermi.cu, normal z -> c, Thu Oct 8 23:05:32 2020
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#include "magma_templates.h"
#define PRECISION_c
#include "gemv_template_device.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/******************************************************************************/
// NoTrans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
__global__ void
cgemvn_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvn_template_device<magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// Trans/ConjTans kernel
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans>
__global__ void
cgemvc_template_kernel_fermi(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
#if (__CUDA_ARCH__ >= 200)
gemvc_template_device< magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE, trans >
(m, n, alpha, A, lda, x, incx, beta, y, incy);
#endif /* (__CUDA_ARCH__ >= 200) */
}
/******************************************************************************/
// NoTrans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvn_template_fermi(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 );
dim3 threads( DIM_X, DIM_Y );
cgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
/******************************************************************************/
// Trans/ConjTans CPU driver
template<const int DIM_X, const int DIM_Y, const int TILE_SIZE>
void
cgemvc_template_fermi(
magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, magma_int_t lda,
const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, magma_int_t incy,
magma_queue_t queue)
{
dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 );
dim3 threads ( DIM_X, DIM_Y );
if (trans == MagmaConjTrans) {
cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
}
/***************************************************************************//**
Purpose
-------
CGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta COMPLEX
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy COMPLEX array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv
*******************************************************************************/
extern "C" void
magmablas_cgemv(
magma_trans_t trans, magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( trans == MagmaNoTrans ) {
cgemvn_template_fermi<version(N, 106)>
( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
else {
cgemvc_template_fermi<version(T, 189)>
( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
}
}
|
38839ca5861540c46488290d339b8883726c6c6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void inplace_multiply_const_dev(
float k, std::uint32_t size, float *px) {
const std::uint32_t i = IDX;
if (i < size) px[i] *= k;
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA::inplace_multiply_const_impl(float k, Tensor &x) {
const std::uint32_t size = x.shape().size();
const std::uint32_t g1 = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::inplace_multiply_const_dev), dim3(g1), dim3(dim1_x_), 0, 0, k, size, MDATA(x));
}
} // namespace devices
} // namespace primitiv
| 38839ca5861540c46488290d339b8883726c6c6b.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__global__ void inplace_multiply_const_dev(
float k, std::uint32_t size, float *px) {
const std::uint32_t i = IDX;
if (i < size) px[i] *= k;
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA::inplace_multiply_const_impl(float k, Tensor &x) {
const std::uint32_t size = x.shape().size();
const std::uint32_t g1 = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::inplace_multiply_const_dev<<<g1, dim1_x_>>>(k, size, MDATA(x));
}
} // namespace devices
} // namespace primitiv
|
763ca7c58da49dbadefa36b97b9b4510f9b4f377.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/hip/HIPContext.h"
#include <cfloat>
#include <tuple>
namespace at {
namespace native {
__host__ __device__ __forceinline__ float fmin(float a, float b) {
return a > b ? b : a;
}
__host__ __device__ __forceinline__ float fmax(float a, float b) {
return a > b ? a : b;
}
template <typename T>
__global__ void RoiPooling2d_forward_kernel(
const int outputElements,
const T *input,
const T *rois,
const T spatialScale,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int pooledHeight,
const int pooledWidth,
T *output,
int *argmaxes)
{
for (int linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < outputElements;
linearIndex += blockDim.x * gridDim.x)
{
// Calculate position in output Tensor, i.e. a specific combination
// of proposal, channel, pool height and pool width
// TODO: write to improve performance by minimize computation
int pw = linearIndex % pooledWidth;
int ph = (linearIndex / pooledWidth) % pooledHeight;
int ch = (linearIndex / pooledWidth / pooledHeight) % inputChannels;
int proposal = linearIndex / pooledWidth / pooledHeight / inputChannels;
// Get particular proposal data
const T *roisOffset = rois + (proposal * 5);
int n = roisOffset[0];
int startWidth = llrintf(roisOffset[1] * spatialScale);
int startHeight = llrintf(roisOffset[2] * spatialScale);
int endWidth = llrintf(roisOffset[3] * spatialScale);
int endHeight = llrintf(roisOffset[4] * spatialScale);
// TODO: fix malformed RoIs to be 1x1
int roiHeight = endHeight - startHeight;
int roiWidth = endWidth - startWidth;
// Calculate size of tile based on the size of this particular RoI and the
// output size
T tileHeight = static_cast<T>(roiHeight) / static_cast<T>(pooledHeight);
T tileWidth = static_cast<T>(roiWidth) / static_cast<T>(pooledWidth);
// Calculate offset into the pooled region
int tileHStart = static_cast<int>(floorf(static_cast<T>(ph) * tileHeight));
int tileWStart = static_cast<int>(floorf(static_cast<T>(pw) * tileWidth));
int tileHEnd = static_cast<int>(ceilf(static_cast<T>(ph + 1) * tileHeight));
int tileWEnd = static_cast<int>(ceilf(static_cast<T>(pw + 1) * tileWidth));
// Calculate offset into the image itself, based on RoI + pooled offsets,
// and ensure it falls within image boundaries
tileHStart = fmin(fmax(tileHStart + startHeight, 0), inputHeight);
tileWStart = fmin(fmax(tileWStart + startWidth, 0), inputWidth);
tileHEnd = fmin(fmax(tileHEnd + startHeight, 0), inputHeight);
tileWEnd = fmin(fmax(tileWEnd + startWidth, 0), inputWidth);
// If our pooling region is empty, we set the output to 0, otherwise to
// the min float so we can calculate the max properly
bool isEmpty = (tileHStart >= tileHEnd) || (tileWStart >= tileWEnd);
T max = isEmpty ? 0 : FLT_MIN;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxIdx = -1;
const T *inputOffset = input + ((n * inputChannels + ch) * inputHeight * inputWidth);
for (int th = tileHStart; th < tileHEnd; ++th) {
for (int tw = tileWStart; tw < tileWEnd; ++tw) {
int index = (th * inputWidth) + tw;
if (inputOffset[index] > max) {
max = inputOffset[index];
maxIdx = index;
}
}
}
output[linearIndex] = max;
// TODO optional argmax
argmaxes[linearIndex] = maxIdx;
}
}
std::tuple<Tensor, Tensor> RoiPooling2d_forward_cuda(
const Tensor& input,
const Tensor& rois,
int64_t pooledHeight,
int64_t pooledWidth,
double spatialScale)
{
// Input is the output of the last convolutional layer in the Backbone network, so
// it should be in the format of NCHW
AT_CHECK(input.ndimension() == 4, "Input to RoI Pooling should be a NCHW Tensor");
// ROIs is the set of region proposals to process. It is a 2D Tensor where the first
// dim is the # of proposals, and the second dim is the proposal itself in the form
// [batch_index startW startH endW endH]
AT_CHECK(rois.ndimension() == 2, "RoI Proposals should be a 2D Tensor, (batch_sz x proposals)");
AT_CHECK(rois.size(1) == 5, "Proposals should be of the form [batch_index startW startH endW enH]");
auto proposals = rois.size(0);
auto inputChannels = input.size(1);
auto inputHeight = input.size(2);
auto inputWidth = input.size(3);
// Output Tensor is (num_rois, C, pooledHeight, pooledWidth)
auto output = input.type().tensor({proposals, inputChannels, pooledHeight, pooledWidth});
// TODO: need some mechanism for determining train vs. test
// During training, we need to store the argmaxes for the pooling operation, so
// the argmaxes Tensor should be the same size as the output Tensor
auto argmaxes = input.type().toScalarType(kInt).tensor({proposals, inputChannels, pooledHeight, pooledWidth});
AT_CHECK(input.is_contiguous(), "input must be contiguous");
AT_CHECK(rois.is_contiguous(), "rois must be contiguous");
dim3 block(512);
dim3 grid((output.numel() + 512 - 1) / 512);
hipLaunchKernelGGL(( RoiPooling2d_forward_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.numel(), input.data<float>(), rois.data<float>(), static_cast<float>(spatialScale), inputChannels,
inputHeight, inputWidth, pooledHeight, pooledWidth, output.data<float>(), argmaxes.data<int>());
AT_CHECK(hipGetLastError() == hipSuccess, "RoiPooling2d_forward_kernel failed with error code ", hipGetLastError());
return std::make_tuple(output, argmaxes);
}
template <typename T>
__global__ void RoiPooling2d_backward_kernel(
const int outputElements,
const T *gradOutput,
const int *argmaxes,
const int proposals,
const T spatialScale,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int pooledHeight,
const int pooledWidth,
T *gradInput,
const T *rois)
{
for (int linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < outputElements;
linearIndex += blockDim.x * gridDim.x)
{
int pw = linearIndex % pooledWidth;
int ph = (linearIndex / pooledWidth) / pooledHeight;
int ch = (linearIndex / pooledWidth / pooledHeight) % inputChannels;
int proposal = linearIndex / pooledWidth / pooledHeight / inputChannels;
const T *roisOffset = rois + (proposal * 5);
int n = roisOffset[0];
int gradInputOffset = (n * inputChannels + ch) * inputHeight * inputWidth;
int gradOutputOffset = (n * inputChannels + ch) * pooledHeight * pooledWidth;
const T* gradOutputShifted = gradOutput + gradOutputOffset;
T *gradInputShifted = gradInput + gradInputOffset;
const int *argmaxesShifted = argmaxes + gradOutputOffset;
int argmax = argmaxesShifted[ph * pooledWidth + pw];
if (argmax != -1) {
atomicAdd(gradInputShifted + argmax, gradOutputShifted[ph * pooledWidth + pw]);
}
}
}
Tensor RoiPooling2d_backward_cuda(
const Tensor& input,
const Tensor& rois,
int64_t pooledHeight,
int64_t pooledWidth,
double spatialScale,
const Tensor& gradOutput,
const Tensor& argmaxes)
{
// TODO: assertions?
auto proposals = rois.size(0);
auto inputChannels = input.size(1);
auto inputHeight = input.size(2);
auto inputWidth = input.size(3);
auto gradInput = input.type().tensor(input.sizes());
dim3 block(512);
dim3 grid((gradInput.numel() + 512 - 1) / 512);
hipLaunchKernelGGL(( RoiPooling2d_backward_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradOutput.numel(), gradOutput.data<float>(), argmaxes.data<int>(), proposals,
static_cast<float>(spatialScale), inputChannels, inputHeight, inputWidth,
pooledHeight, pooledWidth, gradInput.data<float>(), rois.data<float>());
AT_CHECK(hipGetLastError() == hipSuccess, "RoiPooling2d_backward_kernel failed with error code ", hipGetLastError());
return gradInput;
}
} // at::native
} // at
| 763ca7c58da49dbadefa36b97b9b4510f9b4f377.cu | #include "ATen/ATen.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/cuda/CUDAContext.h"
#include <cfloat>
#include <tuple>
namespace at {
namespace native {
__host__ __device__ __forceinline__ float fmin(float a, float b) {
return a > b ? b : a;
}
__host__ __device__ __forceinline__ float fmax(float a, float b) {
return a > b ? a : b;
}
template <typename T>
__global__ void RoiPooling2d_forward_kernel(
const int outputElements,
const T *input,
const T *rois,
const T spatialScale,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int pooledHeight,
const int pooledWidth,
T *output,
int *argmaxes)
{
for (int linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < outputElements;
linearIndex += blockDim.x * gridDim.x)
{
// Calculate position in output Tensor, i.e. a specific combination
// of proposal, channel, pool height and pool width
// TODO: write to improve performance by minimize computation
int pw = linearIndex % pooledWidth;
int ph = (linearIndex / pooledWidth) % pooledHeight;
int ch = (linearIndex / pooledWidth / pooledHeight) % inputChannels;
int proposal = linearIndex / pooledWidth / pooledHeight / inputChannels;
// Get particular proposal data
const T *roisOffset = rois + (proposal * 5);
int n = roisOffset[0];
int startWidth = llrintf(roisOffset[1] * spatialScale);
int startHeight = llrintf(roisOffset[2] * spatialScale);
int endWidth = llrintf(roisOffset[3] * spatialScale);
int endHeight = llrintf(roisOffset[4] * spatialScale);
// TODO: fix malformed RoIs to be 1x1
int roiHeight = endHeight - startHeight;
int roiWidth = endWidth - startWidth;
// Calculate size of tile based on the size of this particular RoI and the
// output size
T tileHeight = static_cast<T>(roiHeight) / static_cast<T>(pooledHeight);
T tileWidth = static_cast<T>(roiWidth) / static_cast<T>(pooledWidth);
// Calculate offset into the pooled region
int tileHStart = static_cast<int>(floorf(static_cast<T>(ph) * tileHeight));
int tileWStart = static_cast<int>(floorf(static_cast<T>(pw) * tileWidth));
int tileHEnd = static_cast<int>(ceilf(static_cast<T>(ph + 1) * tileHeight));
int tileWEnd = static_cast<int>(ceilf(static_cast<T>(pw + 1) * tileWidth));
// Calculate offset into the image itself, based on RoI + pooled offsets,
// and ensure it falls within image boundaries
tileHStart = fmin(fmax(tileHStart + startHeight, 0), inputHeight);
tileWStart = fmin(fmax(tileWStart + startWidth, 0), inputWidth);
tileHEnd = fmin(fmax(tileHEnd + startHeight, 0), inputHeight);
tileWEnd = fmin(fmax(tileWEnd + startWidth, 0), inputWidth);
// If our pooling region is empty, we set the output to 0, otherwise to
// the min float so we can calculate the max properly
bool isEmpty = (tileHStart >= tileHEnd) || (tileWStart >= tileWEnd);
T max = isEmpty ? 0 : FLT_MIN;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxIdx = -1;
const T *inputOffset = input + ((n * inputChannels + ch) * inputHeight * inputWidth);
for (int th = tileHStart; th < tileHEnd; ++th) {
for (int tw = tileWStart; tw < tileWEnd; ++tw) {
int index = (th * inputWidth) + tw;
if (inputOffset[index] > max) {
max = inputOffset[index];
maxIdx = index;
}
}
}
output[linearIndex] = max;
// TODO optional argmax
argmaxes[linearIndex] = maxIdx;
}
}
std::tuple<Tensor, Tensor> RoiPooling2d_forward_cuda(
const Tensor& input,
const Tensor& rois,
int64_t pooledHeight,
int64_t pooledWidth,
double spatialScale)
{
// Input is the output of the last convolutional layer in the Backbone network, so
// it should be in the format of NCHW
AT_CHECK(input.ndimension() == 4, "Input to RoI Pooling should be a NCHW Tensor");
// ROIs is the set of region proposals to process. It is a 2D Tensor where the first
// dim is the # of proposals, and the second dim is the proposal itself in the form
// [batch_index startW startH endW endH]
AT_CHECK(rois.ndimension() == 2, "RoI Proposals should be a 2D Tensor, (batch_sz x proposals)");
AT_CHECK(rois.size(1) == 5, "Proposals should be of the form [batch_index startW startH endW enH]");
auto proposals = rois.size(0);
auto inputChannels = input.size(1);
auto inputHeight = input.size(2);
auto inputWidth = input.size(3);
// Output Tensor is (num_rois, C, pooledHeight, pooledWidth)
auto output = input.type().tensor({proposals, inputChannels, pooledHeight, pooledWidth});
// TODO: need some mechanism for determining train vs. test
// During training, we need to store the argmaxes for the pooling operation, so
// the argmaxes Tensor should be the same size as the output Tensor
auto argmaxes = input.type().toScalarType(kInt).tensor({proposals, inputChannels, pooledHeight, pooledWidth});
AT_CHECK(input.is_contiguous(), "input must be contiguous");
AT_CHECK(rois.is_contiguous(), "rois must be contiguous");
dim3 block(512);
dim3 grid((output.numel() + 512 - 1) / 512);
RoiPooling2d_forward_kernel<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
output.numel(), input.data<float>(), rois.data<float>(), static_cast<float>(spatialScale), inputChannels,
inputHeight, inputWidth, pooledHeight, pooledWidth, output.data<float>(), argmaxes.data<int>());
AT_CHECK(cudaGetLastError() == cudaSuccess, "RoiPooling2d_forward_kernel failed with error code ", cudaGetLastError());
return std::make_tuple(output, argmaxes);
}
template <typename T>
__global__ void RoiPooling2d_backward_kernel(
const int outputElements,
const T *gradOutput,
const int *argmaxes,
const int proposals,
const T spatialScale,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int pooledHeight,
const int pooledWidth,
T *gradInput,
const T *rois)
{
for (int linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < outputElements;
linearIndex += blockDim.x * gridDim.x)
{
int pw = linearIndex % pooledWidth;
int ph = (linearIndex / pooledWidth) / pooledHeight;
int ch = (linearIndex / pooledWidth / pooledHeight) % inputChannels;
int proposal = linearIndex / pooledWidth / pooledHeight / inputChannels;
const T *roisOffset = rois + (proposal * 5);
int n = roisOffset[0];
int gradInputOffset = (n * inputChannels + ch) * inputHeight * inputWidth;
int gradOutputOffset = (n * inputChannels + ch) * pooledHeight * pooledWidth;
const T* gradOutputShifted = gradOutput + gradOutputOffset;
T *gradInputShifted = gradInput + gradInputOffset;
const int *argmaxesShifted = argmaxes + gradOutputOffset;
int argmax = argmaxesShifted[ph * pooledWidth + pw];
if (argmax != -1) {
atomicAdd(gradInputShifted + argmax, gradOutputShifted[ph * pooledWidth + pw]);
}
}
}
Tensor RoiPooling2d_backward_cuda(
const Tensor& input,
const Tensor& rois,
int64_t pooledHeight,
int64_t pooledWidth,
double spatialScale,
const Tensor& gradOutput,
const Tensor& argmaxes)
{
// TODO: assertions?
auto proposals = rois.size(0);
auto inputChannels = input.size(1);
auto inputHeight = input.size(2);
auto inputWidth = input.size(3);
auto gradInput = input.type().tensor(input.sizes());
dim3 block(512);
dim3 grid((gradInput.numel() + 512 - 1) / 512);
RoiPooling2d_backward_kernel<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
gradOutput.numel(), gradOutput.data<float>(), argmaxes.data<int>(), proposals,
static_cast<float>(spatialScale), inputChannels, inputHeight, inputWidth,
pooledHeight, pooledWidth, gradInput.data<float>(), rois.data<float>());
AT_CHECK(cudaGetLastError() == cudaSuccess, "RoiPooling2d_backward_kernel failed with error code ", cudaGetLastError());
return gradInput;
}
} // at::native
} // at
|
c567c6474962115529454a080d6c3b74139afa0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2010 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "operators.h"
#include "timer.h"
__global__ void do_deriv_opt(float *p, int n, float *from, float *source, float inv_dx)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float cache[256];
if (idx < n) {
cache[tid] = from[idx];
}
__syncthreads();
if (idx < n) {
float s = source[idx];
float accum;
if (tid == 0) {
int i_m = idx - 1;
if (i_m == -1) i_m = n-1;
accum = from[i_m];
}
else
accum = cache[tid-1];
if (tid == 255 || idx == (n-1)) {
int i_p = idx + 1;
if (i_p == n) i_p = 0;
accum += from[i_p];
}
else
accum += cache[tid+1];
accum -= 2*cache[tid];
accum *= inv_dx;
accum += s;
p[idx] = accum;
}
}
__global__ void do_deriv(float *p, int n, float *from, float *source, float inv_dx)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
int i_m = idx - 1;
int i_p = idx + 1;
if (i_m == -1) i_m = n-1;
if (i_p == n) i_p = 0;
float accum = source[idx] + (from [i_m] - 2 * from[idx] + from[i_p]) * inv_dx;
p[idx] = accum;
}
}
__global__ void add_mult(float *p, int n, float *from, float scale)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
p[idx] += from[idx] * scale;
}
__global__ void set_to_zero(float *p, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
p[idx] = 0;
}
__global__ void set_in_range(float *p, int n, int start, int end)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
if (idx >= start && idx <= end)
p[idx] = 1;
else
p[idx] = 0;
}
}
int main_gpu2(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
DeviceArray1D phi, dphidt, source;
HostArray1D hphi;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
hphi.allocate(n,0);
hipLaunchKernelGGL(( set_to_zero), dim3((n+255) / 256), dim3(256), 0, 0, &phi.at(0), n);
hipLaunchKernelGGL(( set_in_range), dim3((n+255)/256), dim3(256), 0, 0, &source.at(0), n, n/2, n/2 + n/4);
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
hipLaunchKernelGGL(( do_deriv_opt), dim3((n+255)/256), dim3(256), 0, 0, &dphidt.at(0), n, &phi.at(0), &source.at(0), 1.0f/dx);
hipLaunchKernelGGL(( add_mult), dim3((n+255)/256), dim3(256), 0, 0, &phi.at(0), n, &dphidt.at(0), dt);
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main_gpu1(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
DeviceArray1D phi, dphidt, source;
HostArray1D hphi;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
hphi.allocate(n,0);
hipLaunchKernelGGL(( set_to_zero), dim3((n+255) / 256), dim3(256), 0, 0, &phi.at(0), n);
hipLaunchKernelGGL(( set_in_range), dim3((n+255)/256), dim3(256), 0, 0, &source.at(0), n, n/2, n/2 + n/4);
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
hipLaunchKernelGGL(( do_deriv), dim3((n+255)/256), dim3(256), 0, 0, &dphidt.at(0), n, &phi.at(0), &source.at(0), 1.0f/dx);
hipLaunchKernelGGL(( add_mult), dim3((n+255)/256), dim3(256), 0, 0, &phi.at(0), n, &dphidt.at(0), dt);
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main_cpu(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
HostArray1D phi, dphidt, source;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
int i;
for (i=0; i < n; i++)
phi.at(i) = 0;
for (i=0; i < n; i++)
source.at(i) = 0;
for (i=n/2; i <= n/2 + n/4; i++)
source.at(i) = 1;
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
for (i=0 ; i < n; i++) {
int i_m = (i-1+n)%n;
int i_p = (i+1)%n;
dphidt.at(i) = (phi.at(i_m) - 2*phi.at(i) + phi.at(i_p))/dx + source.at(i);
}
for(i=0; i < n; i++)
phi.at(i) += dt * dphidt.at(i);
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main_metaprog(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
DeviceArray1D phi, dphidt, source;
HostArray1D hphi;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
hphi.allocate(n,0);
phi = constant(0);
source = inrange(identity(), constant(n/2), constant(n/2 + n/4));
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
dphidt = (constant(1/dx) * (phi[-1] - constant(2) * phi[0] + phi[1]) + source[0]);
phi = phi[0] + constant(dt) * dphidt[0];
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main(int argc, const char **argv)
{
if (argc == 1) {
printf("usage: run [cpu|gpu1|gpu2|meta]\n");
exit(-1);
}
if (strcmp(argv[1], "cpu")==0)
return main_cpu(argc, argv);
if (strcmp(argv[1], "gpu1")==0)
return main_gpu1(argc, argv);
if (strcmp(argv[1], "gpu2")==0)
return main_gpu2(argc, argv);
if (strcmp(argv[1], "meta")==0)
return main_metaprog(argc, argv);
}
| c567c6474962115529454a080d6c3b74139afa0b.cu | /*
* Copyright 2010 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "operators.h"
#include "timer.h"
__global__ void do_deriv_opt(float *p, int n, float *from, float *source, float inv_dx)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float cache[256];
if (idx < n) {
cache[tid] = from[idx];
}
__syncthreads();
if (idx < n) {
float s = source[idx];
float accum;
if (tid == 0) {
int i_m = idx - 1;
if (i_m == -1) i_m = n-1;
accum = from[i_m];
}
else
accum = cache[tid-1];
if (tid == 255 || idx == (n-1)) {
int i_p = idx + 1;
if (i_p == n) i_p = 0;
accum += from[i_p];
}
else
accum += cache[tid+1];
accum -= 2*cache[tid];
accum *= inv_dx;
accum += s;
p[idx] = accum;
}
}
__global__ void do_deriv(float *p, int n, float *from, float *source, float inv_dx)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
int i_m = idx - 1;
int i_p = idx + 1;
if (i_m == -1) i_m = n-1;
if (i_p == n) i_p = 0;
float accum = source[idx] + (from [i_m] - 2 * from[idx] + from[i_p]) * inv_dx;
p[idx] = accum;
}
}
__global__ void add_mult(float *p, int n, float *from, float scale)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
p[idx] += from[idx] * scale;
}
__global__ void set_to_zero(float *p, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
p[idx] = 0;
}
__global__ void set_in_range(float *p, int n, int start, int end)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
if (idx >= start && idx <= end)
p[idx] = 1;
else
p[idx] = 0;
}
}
int main_gpu2(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
DeviceArray1D phi, dphidt, source;
HostArray1D hphi;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
hphi.allocate(n,0);
set_to_zero<<<(n+255) / 256, 256>>>(&phi.at(0), n);
set_in_range<<<(n+255)/256, 256>>>(&source.at(0), n, n/2, n/2 + n/4);
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
do_deriv_opt<<<(n+255)/256, 256>>>(&dphidt.at(0), n, &phi.at(0), &source.at(0), 1.0f/dx);
add_mult<<<(n+255)/256, 256>>>(&phi.at(0), n, &dphidt.at(0), dt);
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main_gpu1(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
DeviceArray1D phi, dphidt, source;
HostArray1D hphi;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
hphi.allocate(n,0);
set_to_zero<<<(n+255) / 256, 256>>>(&phi.at(0), n);
set_in_range<<<(n+255)/256, 256>>>(&source.at(0), n, n/2, n/2 + n/4);
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
do_deriv<<<(n+255)/256, 256>>>(&dphidt.at(0), n, &phi.at(0), &source.at(0), 1.0f/dx);
add_mult<<<(n+255)/256, 256>>>(&phi.at(0), n, &dphidt.at(0), dt);
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main_cpu(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
HostArray1D phi, dphidt, source;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
int i;
for (i=0; i < n; i++)
phi.at(i) = 0;
for (i=0; i < n; i++)
source.at(i) = 0;
for (i=n/2; i <= n/2 + n/4; i++)
source.at(i) = 1;
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
for (i=0 ; i < n; i++) {
int i_m = (i-1+n)%n;
int i_p = (i+1)%n;
dphidt.at(i) = (phi.at(i_m) - 2*phi.at(i) + phi.at(i_p))/dx + source.at(i);
}
for(i=0; i < n; i++)
phi.at(i) += dt * dphidt.at(i);
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main_metaprog(int argc, const char **argv)
{
int n = 1024 * 1024;
float dt = 0.1f;
float dx = 0.5f;
DeviceArray1D phi, dphidt, source;
HostArray1D hphi;
phi.allocate(n,0);
dphidt.allocate(n,0);
source.allocate(n,0);
hphi.allocate(n,0);
phi = constant(0);
source = inrange(identity(), constant(n/2), constant(n/2 + n/4));
cpu_timer timer;
timer.start();
for (int step = 0; step < 100; step++) {
dphidt = (constant(1/dx) * (phi[-1] - constant(2) * phi[0] + phi[1]) + source[0]);
phi = phi[0] + constant(dt) * dphidt[0];
}
timer.stop();
printf("Elapsed: %f\n", timer.elapsed_ms());
return 0;
}
int main(int argc, const char **argv)
{
if (argc == 1) {
printf("usage: run [cpu|gpu1|gpu2|meta]\n");
exit(-1);
}
if (strcmp(argv[1], "cpu")==0)
return main_cpu(argc, argv);
if (strcmp(argv[1], "gpu1")==0)
return main_gpu1(argc, argv);
if (strcmp(argv[1], "gpu2")==0)
return main_gpu2(argc, argv);
if (strcmp(argv[1], "meta")==0)
return main_metaprog(argc, argv);
}
|
c67cde274e223c5bf95509abd1a2d3076f0f90cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Filename: main.cu **************************************************************************** /
*
* INPUT:
* -Particulas.in:
* cantParticles
* type x y z Vx Vy Vz q ; where
* dt ; (x,y,z) = posicin respecto de algn (0,0,0)
* temp0 ; (Vx,Vy,Vz) = Velocidades iniciales
* tempi ; q = carga
* tautp ; dt = delta_tiempo
* ; temp0 = temperatura target
* ; tempi = temperatura inicial (No se usa an)
* ; tautp = factor de correccin de velocidades
*
*
*
* -TablaCoeficientesLennard
* type sigma epsilon mass min max ; donde min y max indican de qu valor
* ; a qu valor hay que densificar las muestras
* ; (NO ESTA IMPLEMENTADO AUN)
*
* ALGORITMO:
* 1-Levantar Coeficientes
* 2-Armar matriz de lennard para cant_samples_r muestras
* Para cada tipo de partcula:
* Calcular en funcion de los coeficientes el potencial para cant_samples_r valores r
* 3-Levantar partculas
* Ordenar y armar ndices
* Para cada iteracin de MD:
* 4-Calcular distancias:
* Cada partcula contra todas las otras
* Armar matriz de distancias
* 5-Calcular las derivadas respecto de r para cada par de partculas
* 6-Calcular fuerza para cada particula:
* Cada partcula contra todas las otras: matriz 3D
* Obtener fuerza resultante para cada partcula: vector 3D
* 7-Calcular nuevas posiciones: vector 3D
*
***************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <vector>
#include <algorithm>
#include <cmath>
#include <string>
#include <iomanip>
#include <sys/time.h>
/** **************************************************************** **/
/** ************* DEFAULT GLOBAL VARIABLES VALUES ****************** **/
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 16
#define BLOCK_SIZE (BLOCK_SIZE_X*BLOCK_SIZE_Y)
//#define TEXTURE_MEM_SIZE 5000
#define DIF_FINITAS_DELTA 4
/** Variables fsicas **/
#define CANT_TYPES 37
#define MAx 15
#define MIn 0.3
#define DIST (MAx - MIn)
#define DELTA_TIEMPO 0.001
#define TEMP 100
#define TAO 0.1
#define BOX_MAX 999 // distancia mxima del 0 para cada coordenada
// Determinamos un cubo de volumen = (2*BOX_MAX) ^3
/** Filenames **/
char* lennardTableFileName = "Input_Mache/TablaCoeficientesLennard";
char* particlesFileName = "Input_Mache/particles.in";
char* debugOutputFilename = "Output_Mache/debug.out";
char* outputFilename = "Output_Mache/results.out";
char* crdFilename = "Output_Mache/mdcrd";
char* timeFilename = "Output_Mache/times.out";
using namespace std;
// streamsize ss = cout.precision();
/** **************************************************************** **/
/** ******************** GLOBAL VARIABLES ************************** **/
texture <float, hipTextureType2D,hipReadModeElementType> texRef;
double delta_tiempo = DELTA_TIEMPO;
double temp0 = TEMP;
double tempi;
double tautp = TAO;
double Boltzmann_cte = 0.0019872041;
double box_max_x = BOX_MAX;
double box_max_y = BOX_MAX;
double box_max_z = BOX_MAX;
bool box = true;
double cut = 12;
int cant_steps = 1;
int cant_types = CANT_TYPES;
int TEXTURE_MEM_SIZE=65000;
bool derivative = false;
bool analytic = false;
bool results = false;
bool amberResults = false;
bool coordinates = false;
bool periodicity = false;
/** **************************************************************** **/
/** ************************* DEVICE ******************************* **/
__global__
void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6));
}
/** **************************************************************** **/
__global__
void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13));
}
/** **************************************************************** **/
__global__
void close_distances_kernel(double* X, double* Y, double* Z, double* R,
double* position_x, double* position_y, double* position_z,
double box_x, double box_y, double box_z, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height) {return;}
unsigned int pos = j*width+i;
double _X = position_x[i] - position_x[j];
double _Y = position_y[i] - position_y[j];
double _Z = position_z[i] - position_z[j];
_X = _X - box_x * round((double) _X/box_x);
_Y = _Y - box_y * round((double) _Y/box_y);
_Z = _Z - box_z * round((double) _Z/box_z);
X[pos] = _X;
Y[pos] = _Y;
Z[pos] = _Z;
R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z );
}
/** **************************************************************** **/
__global__
void distances_kernel(double* R, double* X, double* Y, double* Z,
double* x1, double* y1, double* z1, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
double x_ = x1[x] - x1[y];
double y_ = y1[x] - y1[y];
double z_ = z1[x] - z1[y];
X[y*width+x] = x_;
Y[y*width+x] = y_;
Z[y*width+x] = z_;
R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ );
}
/** **************************************************************** **/
__global__
void derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
double E_r_up = (double) tex2D( texRef, index_x + DIF_FINITAS_DELTA, t_o_p_2 );
double E_r_dwn = (double) tex2D( texRef, index_x - DIF_FINITAS_DELTA, t_o_p_2 );
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / cant_samples_r;
dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif);
}
/** **************************************************************** **/
__global__
void direct_derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
/** **************************************************************** **/
__global__
void E_r(double* Er, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these
float row = t_o_p_2 + 0.5 + (t_o_p_1* cant_types);
/** Convierto r a subndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
Er[y*width+x] = (double) tex2D( texRef, index_x, row );
}
/* ***************************************************************** **/
/** +ANALYTIC */
/** **************************************************************** **/
__global__
void derivative_E_r_analytic(double* dEr, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
dEr[y*width+x] = (double) 24.0*eps12*( pow(sig12,6)/ pow(r[y*width+x],7) - 2 * pow(sig12,12)/ pow(r[y*width+x],13));
}
__global__
void E_r_analytic(double* Er, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partcula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6));
}
/** **************************************************************** **/
/** -ANALYTIC */
/* ***************************************************************** **/
/** **************************************************************** **/
/* Fx = dE(r) / dr * (x1-x2) / r */
__global__
void Parcial_Forces_Kernel(double* force, double* dEr, double* dif, double* r, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
if(x == y) {force[y*width+x] = 0; return;}
force[y*width+x] = dEr[y*width+x] * dif[y*width+x] / r[y*width+x];
}
/** **************************************************************** **/
__global__
void Resultant_Forces_Kernel(double* result, double* forces, int cant)
{
/* Elemento del vector a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x >= cant) {return;}
int i = 0;
double tmp = 0;
int row = x*cant;
for(; i < cant; i++){
tmp += forces[row + i];
}
result[x] = tmp;
}
/** **************************************************************** **/
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
__global__
void Resultant_Velocities_Kernel(double* velocity, double* old_velocity, double* force, double* m,
int* item_to_type, double delta_tiempo, int cant_particles)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant_particles) {return;}
double Vt = old_velocity[i];
int type = item_to_type[i];
double dtx = delta_tiempo*20.455;
/* Result */
velocity[i] = Vt + ( (force[i]*dtx) / m[type] );
}
/** **************************************************************** **/
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
__global__
void Resultant_Positions_Kernel(double* positions, double* velocity, double delta_tiempo, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double dtx = delta_tiempo*20.455;
positions[i] = positions[i] + (velocity[i] * dtx);
}
/** **************************************************************** **/
/* -BOX_MAX 0 BOX_MAX */
/* |-----------------|-----------------| */
__global__
void Adjustin_Positions_Kernel(double* position, double box_max, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double pos = position[i] - box_max;
if(pos > 0){
position[i] = -box_max + fmod(pos, (double) (2*box_max));
}
if(pos < -2*box_max){
position[i] = box_max + fmod(pos, (double) (2*box_max));
}
}
/** **************************************************************** **/
/* Ek = |v|^2 * m / 2 */
/* Ek_x = (v_x)^2 * m / 2 */
__global__
void Kinetic_Energy_Kernel(double* kE, double* vold, double* v, double* m, int* item_to_type, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
double vi = vold[i] + v[i];
int type = item_to_type[i];
kE[i] = vi * vi * m[type] / 8;
}
/** **************************************************************** **/
__global__
void Total_Kinetic_Energy_Kernel(double* kE, double* Ke_x, double* Ke_y, double* Ke_z, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
kE[i] = Ke_x[i] + Ke_y[i] + Ke_z[i];
}
/** **************************************************************** **/
__global__
void Corrected_Velocities_Kernel(double* vold, double* v, double lambda, int cant){
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
vold[i] = v[i] * lambda;
}
/** **************************************************************** **/
/** *************************** HOST ******************************* **/
int main( int argc, char* argv[] )
{
for(uint i = 0; i < argc; i++){
if(strcmp(argv[i], "-t") == 0){
/* outputTimeFilename */
timeFilename = argv[i+1];
}
if(strcmp(argv[i], "-a") == 0){
/* ANALYTIC mode */
analytic = true;
}
if(strcmp(argv[i], "-d") == 0){
/* DERIVATIVE mode */
derivative = true;
}
if(strcmp(argv[i], "-r") == 0){
/* RESULTS or TIMER mode */
results = true;
amberResults = true;
}
if(strcmp(argv[i], "-ar") == 0){
/* RESULTS */
amberResults = true;
}
if(strcmp(argv[i], "-c") == 0){
/* PRINT mdcrd file */
coordinates = true;
}
if(strcmp(argv[i], "-p") == 0){
/* Periodicity */
periodicity = true;
}
if(strcmp(argv[i], "-tex") == 0){
/* Periodicity */
TEXTURE_MEM_SIZE=atoi(argv[i+1]);
}
}
if (derivative)
cout << "Derivative" << endl;
if (analytic)
cout << "Analytic" << endl;
if(results){
cout << "DEBUG mode ON" << endl;
}
if(amberResults){
cout << "AMBER results ON" << endl;
}
fstream out;
fstream crd;
//if(results or amberResults){
/* Output file */
out.open(outputFilename,fstream::out);
streamsize ss = out.precision();
out << setprecision(20);
//}
if(coordinates){
/* CRD output file */
crd.open(crdFilename,fstream::out);
crd << setprecision(3);
crd.setf( std::ios::fixed, std:: ios::floatfield );
crd << " POS(x) POS(y) POS(z)" << endl;
}
struct timeval tv1, tv2;
fstream taim;
if(!results){ //timer mode ON
/* Time output file */
taim.open(timeFilename, fstream::app | fstream::out);
taim << setprecision(20);
}
/* Levantamos Coeficientes de Lennard */
ifstream table (lennardTableFileName);
table >> cant_types;
/**Variables y memoria*/
size_t cant_types_size = cant_types * sizeof(double);
vector<string> h_type;
h_type.resize(cant_types);
double* h_sigma = (double*) ( malloc(cant_types_size));
double* h_epsilon = (double*) ( malloc(cant_types_size));
double* h_mass = (double*) ( malloc(cant_types_size));
/**Levantamos datos*/
for(int j = 0; j<cant_types ; j++){
table >> h_type[j];
table >> h_sigma[j];
table >> h_epsilon[j];
table >> h_mass[j];
}
table.close();
/* Armamos matrices de lennard */
/**Variables y memoria**/
int cant_samples_r = TEXTURE_MEM_SIZE/(sizeof(float)); // cant of original sample values (mximo permitido por mem de textura)
double var = DIST / ((double) cant_samples_r); // variation of r
size_t cant_samples_r_size = cant_samples_r * sizeof(float);
float* h_dLJPot;
float* h_LJPot;
if(derivative)
h_dLJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
else
h_LJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
int width = cant_samples_r;
int height = cant_types;
dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y);
dim3 dimGrid( (int) ceil((double)width / (double)dimBlock.x), (int) ceil((double)height / (double)dimBlock.y) );
double* d_EPS;
double* d_SIG;
float* d_LJPot;
float* d_dLJPot;
hipMalloc(&d_EPS, cant_types_size);
hipMalloc(&d_SIG, cant_types_size);
hipMemcpy(d_EPS, h_epsilon, cant_types_size, hipMemcpyHostToDevice);
hipMemcpy(d_SIG, h_sigma, cant_types_size, hipMemcpyHostToDevice);
if(derivative)
hipMalloc(&d_dLJPot, cant_samples_r_size * cant_types);
else
hipMalloc(&d_LJPot, cant_samples_r_size * cant_types);
/** Rellenamos datos con CUDA **/
if(derivative) {
for(int a = 0; a<cant_types; a++){
hipLaunchKernelGGL(( derivatives_lennard_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dLJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
hipMemcpy( (float*) &(h_dLJPot[(a*cant_samples_r*cant_types)]), d_dLJPot, cant_types * cant_samples_r_size, hipMemcpyDeviceToHost);
}
} else {
for(int a = 0; a<cant_types; a++){
hipLaunchKernelGGL(( lennard_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_LJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
hipMemcpy( (float*) &(h_LJPot[(a*cant_samples_r*cant_types)]), d_LJPot, cant_types * cant_samples_r_size, hipMemcpyDeviceToHost);
}
}
/** Liberamos memoria de CUDA **/
hipFree(&d_EPS);
hipFree(&d_SIG);
hipFree(&d_LJPot);
if(results){
/** DEBUG **/
if(derivative)
out << " derivative LENNARD " << endl;
else
out << " LENNARD " << endl;
for(int a = 0; a<cant_types; a++){
out << " Type = " << h_type[a] << endl << " ";
for(int i = 0; i<cant_types; i++){
for(int j = 0; j<cant_samples_r; j+= cant_samples_r/8){
if(derivative)
out << h_dLJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
else
out << h_LJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
}
out << endl << " ";
}
out << "***********************************************************************************" << endl;
}
/** DEBUG **/
}
/*Levantamos partculas*/
fstream particles;
particles.open(particlesFileName);
/** Variables y memoria **/
uint cant_particles;
double* h_position_x;
double* h_position_y;
double* h_position_z;
double* h_velocity_x;
double* h_velocity_y;
double* h_velocity_z;
double* h_velocity_old_x;
double* h_velocity_old_y;
double* h_velocity_old_z;
double* h_chargue;
double h_box_x;
double h_box_y;
double h_box_z;
double h_box_alpha;
double h_box_beta;
double h_box_gamma;
vector<string> h_particle_type;
particles >> cant_particles;
size_t cant_particles_size = cant_particles * sizeof(double);
h_position_x = (double*)malloc(cant_particles_size);
h_position_y = (double*)malloc(cant_particles_size);
h_position_z = (double*)malloc(cant_particles_size);
h_velocity_x = (double*)malloc(cant_particles_size);
h_velocity_y = (double*)malloc(cant_particles_size);
h_velocity_z = (double*)malloc(cant_particles_size);
h_velocity_old_x = (double*)malloc(cant_particles_size);
h_velocity_old_y = (double*)malloc(cant_particles_size);
h_velocity_old_z = (double*)malloc(cant_particles_size);
h_chargue = (double*)malloc(cant_particles_size);
h_particle_type.resize(cant_particles);
/** Guardamos datos **/
for(uint i = 0; i < cant_particles ; i++) {
particles >> h_particle_type[i];
particles >> h_position_x[i];
particles >> h_position_y[i];
particles >> h_position_z[i];
particles >> h_velocity_old_x[i];
particles >> h_velocity_old_y[i];
particles >> h_velocity_old_z[i];
particles >> h_chargue[i];
}
/** Perioricidad **/
//TODO: por ahora usamos cubo,
//situamos el cero en el centro del mismo
//Recibimos en orden x, y, z
particles >> box;
if(box){
cout << " Levantamos caja" << endl;
particles >> h_box_x;
particles >> h_box_y;
particles >> h_box_z;
particles >> h_box_alpha;
particles >> h_box_beta;
particles >> h_box_gamma;
if( h_box_alpha != 90 or h_box_beta != 90 or h_box_gamma != 90){
cout << " Se forzaron los angulos para que sea un CUBO: " << endl;
}
box_max_x = h_box_x/2;
box_max_y = h_box_y/2;
box_max_z = h_box_z/2;
}
/** Parametros **/
particles >> cant_steps;
particles >> delta_tiempo;
particles >> temp0;
particles >> tempi;
particles >> tautp;
particles >> cut;
particles.close();
// if(results){
// /** DEBUG **/
// out << " INITIAL VALUES" << endl;
// for(int i = 0; i<cant_particles; i++){
// out << " Type: " << h_particle_type[i] << " | Pos: (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")";
// out << " | Vel: (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
// }
// out << endl;
//
// /** DEBUG **/
// }
if(results){
// /** DEBUG **/
// out << " CANT of TYPES" << endl;
// for(int i = 0; i < h_type.size(); i++){
// out << " " << h_type[i] << " " << cant_of_typ[i] << endl;
// }
// out << endl;
/** DEBUG **/
}
/* Armamos estructura de items para saber de qu tipo
/* es la partcula en la que estamos en CUDA */
/** h_particle_type = H H H H H K K K K K O O O O O O O O O ... **/
/** h_item_particle = 1 1 1 1 1 3 3 3 3 3 9 9 9 9 9 9 9 9 9 ... **/
int * h_item_particle = (int*)malloc(cant_particles * sizeof(int));
int * d_item_particle;
hipMalloc(&d_item_particle, cant_particles * sizeof(int));
/** Convertimos anotamos type de la partcula como un int que sera el index dentro de h_type **/
for(int i = 0; i< cant_particles; i++){
for(int j = 0; j< h_type.size(); j++){
if(h_type[j] == h_particle_type[i]){
h_item_particle[i] = j;
break;
}
}
}
hipMemcpy(d_item_particle, h_item_particle, cant_particles * sizeof(int), hipMemcpyHostToDevice);
// if(results){
// /** DEBUG **/
// out << " ITEM to TYPE" << endl;
// for(int i = 0; i < cant_particles; i++){
// out << " Particle[" << i << "] | Type: " << h_type[h_item_particle[i]] << " (index :" << h_item_particle[i] << ") " << endl;
// }
// out << endl;
// /** DEBUG **/
// }
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL DISPOSITIVO GPU */
/* ************************************************ */
/** Variables **/
size_t s_size = cant_particles_size * cant_particles;
/** Positions **/
double* d_position_x;
double* d_position_y;
double* d_position_z;
hipMalloc(&d_position_x, cant_particles_size);
hipMalloc(&d_position_y, cant_particles_size);
hipMalloc(&d_position_z, cant_particles_size);
hipMemcpy(d_position_x, h_position_x, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_position_y, h_position_y, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_position_z, h_position_z, cant_particles_size, hipMemcpyHostToDevice);
/** Positions **/
double* d_pos_close_x;
double* d_pos_close_y;
double* d_pos_close_z;
hipMalloc(&d_pos_close_x, cant_particles_size);
hipMalloc(&d_pos_close_y, cant_particles_size);
hipMalloc(&d_pos_close_z, cant_particles_size);
/** Particle's mass **/
double* d_mass;
hipMalloc(&d_mass, cant_types_size);
hipMemcpy(d_mass, h_mass, cant_types_size, hipMemcpyHostToDevice);
/** Velocities **/
double* d_velocity_x;
double* d_velocity_y;
double* d_velocity_z;
double* d_velocity_old_x;
double* d_velocity_old_y;
double* d_velocity_old_z;
hipMalloc(&d_velocity_x, cant_particles_size);
hipMalloc(&d_velocity_y, cant_particles_size);
hipMalloc(&d_velocity_z, cant_particles_size);
hipMalloc(&d_velocity_old_x, cant_particles_size);
hipMalloc(&d_velocity_old_y, cant_particles_size);
hipMalloc(&d_velocity_old_z, cant_particles_size);
hipMemcpy(d_velocity_old_x, h_velocity_old_x, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_velocity_old_y, h_velocity_old_y, cant_particles_size, hipMemcpyHostToDevice);
hipMemcpy(d_velocity_old_z, h_velocity_old_z, cant_particles_size, hipMemcpyHostToDevice);
/** Distances **/
double* d_distance_x;
double* d_distance_y;
double* d_distance_z;
double* d_distance_r;
hipMalloc(&d_distance_x, s_size);
hipMalloc(&d_distance_y, s_size);
hipMalloc(&d_distance_z, s_size);
hipMalloc(&d_distance_r, s_size);
/** Derivatives **/
double* d_dEr;
hipMalloc(&d_dEr, s_size);
/** VDWAALS **/
double* d_Er;
hipMalloc(&d_Er, s_size);
/** Forces **/
double* d_Force_x;
double* d_Force_y;
double* d_Force_z;
hipMalloc(&d_Force_x, s_size);
hipMalloc(&d_Force_y, s_size);
hipMalloc(&d_Force_z, s_size);
double* d_Force_x_resultant;
double* d_Force_y_resultant;
double* d_Force_z_resultant;
hipMalloc(&d_Force_x_resultant, cant_particles_size);
hipMalloc(&d_Force_y_resultant, cant_particles_size);
hipMalloc(&d_Force_z_resultant, cant_particles_size);
/** Kinetic Energy **/
double* d_kinetic_energy;
double* d_kinetic_energy_x;
double* d_kinetic_energy_y;
double* d_kinetic_energy_z;
hipMalloc(&d_kinetic_energy, cant_particles_size);
hipMalloc(&d_kinetic_energy_x, cant_particles_size);
hipMalloc(&d_kinetic_energy_y, cant_particles_size);
hipMalloc(&d_kinetic_energy_z, cant_particles_size);
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL HOST */
/* ************************************************ */
/** Distances **/
double (*h_distance_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_r)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
/** Forces **/
double (*h_Force_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double* h_Force_x_resultant = (double*)malloc(cant_particles_size);
double* h_Force_y_resultant = (double*)malloc(cant_particles_size);
double* h_Force_z_resultant = (double*)malloc(cant_particles_size);
/** Kinetic Energy **/
double* h_kinetic_energy = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_x = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_y = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_z = (double*)malloc(cant_particles_size);
/* ************************************************ */
/* Calculamos ENERGIA CINETICA deseada */
/* ************************************************ */
/* Ek = Kb * T (3N - Nc) / 2 */
double Nc = 5;
double factor_conv_T_Ek = 2 / (Boltzmann_cte * (3 *cant_particles - Nc) );
if(amberResults){
double kinetic_Energy = Boltzmann_cte * temp0 * (3*cant_particles - Nc) / 2;
/** DEBUG **/
out << " THEORETICAL VALUES:" << endl << endl;
out << " * Kb = " << Boltzmann_cte << endl << endl;
out << " * Temperature = " << temp0 << endl << endl;
out << " * Kinetic Energy = " << kinetic_Energy << endl << endl;
out << " * Factor_conv_T_Ek = " << factor_conv_T_Ek << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Seteamos la memoria de TEXTURA */
/* ************************************************ */
hipArray* cuLennard_i;
// if(!analytic){
/** Usamos texturas **/
hipChannelFormatDesc channelDesc = hipCreateChannelDesc( 32, 0, 0, 0, hipChannelFormatKindFloat );
hipMallocArray(&cuLennard_i, &channelDesc, cant_samples_r, cant_types*cant_types); //width x height
texRef.addressMode[0] = hipAddressModeClamp;
texRef.filterMode = hipFilterModeLinear; //hipFilterModePoint; // //Tipo de interpolacin
if(derivative) {
hipMemcpyToArray(cuLennard_i, 0, 0, h_dLJPot, cant_types * cant_types * cant_samples_r_size, hipMemcpyHostToDevice);
} else {
hipMemcpyToArray(cuLennard_i, 0, 0, h_LJPot, cant_types * cant_types * cant_samples_r_size, hipMemcpyHostToDevice);
}
/** Bindeamos la textura **/
hipBindTextureToArray(texRef, cuLennard_i, channelDesc);
// }
if(amberResults){
out << endl << " ESTARTIN DE PROGRAM" << endl;
out << " Amaunt of itereishons = " << cant_steps << endl << endl;
}
for(int i=0 ; i<10000 ; i++){
for(int j=0 ; j<1000 ; j++){
}
}
/** Esperamos a que termine de bindear la textura **/
hipDeviceSynchronize();
if(!results){ //timer mode ON
/** Arrancamos medicion del tiempo **/
gettimeofday(&tv1, NULL);
}
for(int step = 0; step < cant_steps; step++){
/* ********************************************************************************************************** */
/* ****************************************** INICIO Iteracion DM ******************************************* */
/* ********************************************************************************************************** */
if(amberResults){
out << "/* ************************************************************************************************ */" << endl;
out << "/* ************************************* INICIO Iteracion " << step << " ************************************ */" << endl;
out << "/* ************************************************************************************************ */" << endl;
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ************************************************ */
/* Calculamos Matriz de Distancias entre partculas */
/* ************************************************ */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
if(!periodicity){
hipLaunchKernelGGL(( distances_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_distance_r, d_distance_x, d_distance_y, d_distance_z,
d_position_x, d_position_y, d_position_z, width, height);
} else {
/**Rellenamos datos**/
hipLaunchKernelGGL(( close_distances_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_distance_x, d_distance_y, d_distance_z, d_distance_r,
d_position_x, d_position_y, d_position_z,
h_box_x, h_box_y, h_box_z, width, height);
}
if(results){
/** DEBUG **/
hipMemcpy(h_distance_r, d_distance_r, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_distance_x, d_distance_x, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_distance_y, d_distance_y, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_distance_z, d_distance_z, s_size, hipMemcpyDeviceToHost);
out << " DISTANCES" << endl << " ";
double (*matriz)[cant_particles] = (double (*)[cant_particles]) h_distance_r;
for(int i = 0; i<cant_particles; i+= cant_particles/8){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles/8){
out << matriz[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Derivadas */
/* ************************************************ */
/** Variables y memoria **/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if(analytic){
hipLaunchKernelGGL(( derivative_E_r_analytic), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
hipLaunchKernelGGL(( E_r_analytic), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
// }
} else {
// /** Calculo de la derivada dE(r)/dr usando diferencias finitas **/
if(derivative){
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
hipLaunchKernelGGL(( direct_derivative_E_r), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
} else {
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
hipLaunchKernelGGL(( derivative_E_r), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
}
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
hipLaunchKernelGGL(( E_r), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
}
// }
//
// }
/*
if(!derivative){
out << " Lennard-Jones" << endl << " ";
double vdwaals = 0;
double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
hipMemcpy(h_Er, d_Er, s_size, hipMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_Er[i][j] << "\t";
if(i<=j)
vdwaals += h_Er[i][j];
}
out << endl << " ";
}
//out << endl;
out << " VDWAALS = " << vdwaals << endl << endl;
//taim << TEXTURE_MEM_SIZE << " " << vdwaals << endl;
free(h_Er);
}
*/
// if(amberResults){
//if(!derivative){
/** DEBUG **/
//out << " Lennard-Jones" << endl << " ";
double vdwaals = 0;
double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
hipMemcpy(h_Er, d_Er, s_size, hipMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i++){
//out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
//out << h_Er[i][j] << "\t";
//if(i<=j)
vdwaals += h_Er[i][j];
}
//out << endl << " ";
}
out << endl;
out << " VDWAALS = " << vdwaals << endl << endl;
free(h_Er);
/** DEBUG **/
//}
//}
if(results){
/** DEBUG **/
out << " DERIVATIVES" << endl << " ";
double (*h_dEr)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
hipMemcpy(h_dEr, d_dEr, s_size, hipMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i+= cant_particles/8){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles/8){
out << h_dEr[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
free(h_dEr);
/** DEBUG **/
}
if(results){
/** DEBUG **/
hipMemcpy(h_velocity_old_x, d_velocity_old_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_old_y, d_velocity_old_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_old_z, d_velocity_old_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " OLD VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos FUERZAS resultantes */
/* ************************************************ */
/* Fx = dE(r) / dr * (x1-x2) / r *
* Fy = dE(r) / dr * (y1-y2) / r *
* Fz = dE(r) / dr * (z1-z2) / r */
/* Calculo de vectores parciales */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
/** Calculo del vector F **/
hipLaunchKernelGGL(( Parcial_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_x, d_dEr, d_distance_x, d_distance_r, width, height);
hipLaunchKernelGGL(( Parcial_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_y, d_dEr, d_distance_y, d_distance_r, width, height);
hipLaunchKernelGGL(( Parcial_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_z, d_dEr, d_distance_z, d_distance_r, width, height);
//if(results){
/** DEBUG **/
//**************************************************
//*********IMPRIMO LAS FUERZAS*********************
//*************************************************
/*
hipMemcpy(h_Force_x, d_Force_x, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_y, d_Force_y, s_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_z, d_Force_z, s_size, hipMemcpyDeviceToHost);
out << " FORCES" << endl << " ";
for(int i = 0; i<cant_particles; i++){
for(int j = 0; j<cant_particles; j++){
out << "(" << h_Force_x[i][j] << " , " << h_Force_y[i][j] << " , " << h_Force_z[i][j] << ")\t";
if (j==0 && i==1)
taim << TEXTURE_MEM_SIZE << " " << h_Force_x[i][j] << endl;
}
out << endl << " ";
}
out << endl;
//}
*/
/* Calculo del vector F */
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
hipLaunchKernelGGL(( Resultant_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_x_resultant, d_Force_x, cant_particles);
hipLaunchKernelGGL(( Resultant_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_y_resultant, d_Force_y, cant_particles);
hipLaunchKernelGGL(( Resultant_Forces_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Force_z_resultant, d_Force_z, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_Force_x_resultant, d_Force_x_resultant, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_y_resultant, d_Force_y_resultant, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_Force_z_resultant, d_Force_z_resultant, cant_particles_size, hipMemcpyDeviceToHost);
out << " RESULTANT FORCES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_Force_x_resultant[i] << " , " << h_Force_y_resultant[i] << " , " << h_Force_z_resultant[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos VELOCIDADES Resultantes */
/* ************************************************ */
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Piso las velocidades acumuladas al tiempo t con las nuevas de t+Dt */
hipLaunchKernelGGL(( Resultant_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_x, d_velocity_old_x, d_Force_x_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_y, d_velocity_old_y, d_Force_y_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Velocities_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velocity_z, d_velocity_old_z, d_Force_z_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_velocity_x, d_velocity_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_y, d_velocity_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_z, d_velocity_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos POSICIONES Resultantes */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/* (TODO: ajustar condiciones de perioricidad */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
hipLaunchKernelGGL(( Resultant_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_x, d_velocity_x, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_y, d_velocity_y, delta_tiempo, cant_particles);
hipLaunchKernelGGL(( Resultant_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_z, d_velocity_z, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_position_x, d_position_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_y, d_position_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_z, d_position_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " RESULTANT POSITIONS" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
if(periodicity){
/* ************************************************ */
/* Calculamos POSICIONES con PERIORICIDAD */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
hipLaunchKernelGGL(( Adjustin_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_x, box_max_x, cant_particles);
hipLaunchKernelGGL(( Adjustin_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_y, box_max_y, cant_particles);
hipLaunchKernelGGL(( Adjustin_Positions_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_position_z, box_max_z, cant_particles);
}
if(coordinates){
/** DEBUG **/
hipMemcpy(h_position_x, d_position_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_y, d_position_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_position_z, d_position_z, cant_particles_size, hipMemcpyDeviceToHost);
if(results){
out << " RESULTANT POSITIONS in the CUBE" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
}
for(int i = 0; i<cant_particles; i+=2){
crd << " " << h_position_x[i] << " " << h_position_y[i] << " " << h_position_z[i];
if(i+1 < cant_particles){
crd << " " << h_position_x[i+1] << " " << h_position_y[i+1] << " " << h_position_z[i+1] << endl;
} else
crd << endl;
}
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek de cada partcula */
/* ************************************************ */
/* Ek = |vp|^2 * m / 2 con vp = (vold+v)/2 */
/* Ek_x = (v_x)^2 * m / 2 */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la energa cintica para las tres coordenadas de cada partcula **/
/** Puede hacerse directamente as, sin calcular mdulo por propiedades algebraicas **/
hipLaunchKernelGGL(( Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy_x, d_velocity_old_x, d_velocity_x, d_mass, d_item_particle, cant_particles);
hipLaunchKernelGGL(( Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy_y, d_velocity_old_y, d_velocity_y, d_mass, d_item_particle, cant_particles);
hipLaunchKernelGGL(( Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy_z, d_velocity_old_z, d_velocity_z, d_mass, d_item_particle, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_kinetic_energy_x, d_kinetic_energy_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_kinetic_energy_y, d_kinetic_energy_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_kinetic_energy_z, d_kinetic_energy_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << i+1 << ": (" << h_kinetic_energy_x[i] << " , " << h_kinetic_energy_y[i] << " , " << h_kinetic_energy_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek Resultante */
/* ************************************************ */
/* Ek_TOT = sum (Ek_i) */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la Energa cintica total de cada partcula **/
hipLaunchKernelGGL(( Total_Kinetic_Energy_Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kinetic_energy, d_kinetic_energy_x, d_kinetic_energy_y, d_kinetic_energy_z, cant_particles);
/* */
/** Calculamos la Energa cintica total del sistema **/
hipMemcpy(h_kinetic_energy, d_kinetic_energy, cant_particles_size, hipMemcpyDeviceToHost);
double Ek_TOT = 0;
for(int i = 0; i<cant_particles; i++){
Ek_TOT += h_kinetic_energy[i];
}
if(results){
/** DEBUG **/
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << " " << h_kinetic_energy[i] << endl;
}
out << endl;
/** DEBUG **/
}
//if(amberResults){
out << " Total Kinetic Energy(t) = " << Ek_TOT << endl << endl;
// }
/* ************************************************ */
/* Calculamos Temperatura Resultante */
/* ************************************************ */
/* T(t) = 2*Ek_TOT / (Kb*(3N-Nc)) */
double Temp_TOT = Ek_TOT * factor_conv_T_Ek;
//if(amberResults){
/** DEBUG **/
out << " Temp(t) = " << Temp_TOT << endl << endl;
/** DEBUG **/
//}
/* *********************************************** */
/* Calculamos Factor de Correccion */
/* *********************************************** */
/* lambda = sqrt( 1 + 2 * dt / tautp * (T/T(t) -1) ) */
double lambda = sqrt( 1 + delta_tiempo / tautp * (temp0/Temp_TOT -1) );
if(amberResults){
/** DEBUG **/
out << " lambda(t) = " << lambda << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Velocidades Corregidas */
/* ************************************************ */
/* vi = lambda * vi */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Piso las velocidades acumuladas al tiempo t+Dt con las nuevas de t+Dt corregidas */
//Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_x, d_velocity_x, lambda, cant_particles);
//Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_y, d_velocity_y, lambda, cant_particles);
//Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_z, d_velocity_z, lambda, cant_particles);
if(results){
/** DEBUG **/
hipMemcpy(h_velocity_x, d_velocity_old_x, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_y, d_velocity_old_y, cant_particles_size, hipMemcpyDeviceToHost);
hipMemcpy(h_velocity_z, d_velocity_old_z, cant_particles_size, hipMemcpyDeviceToHost);
out << " CORRECTED RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ********************************************************************************************************** */
/* ******************************************* FIN Iteracion DM ********************************************* */
/* ********************************************************************************************************** */
}
if(!results){ //timer mode ON
gettimeofday(&tv2, NULL);
taim << cant_steps << " " << (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec) << endl;
}
// if(!analytic){
/** Unbindeamos Textura y liberamos memoria **/
hipUnbindTexture(texRef);
hipFreeArray(cuLennard_i);
// }
// if(results or amberResults){
out.close();
// }
if(coordinates){
crd.close();
}
/* ************************************************ */
/* Liberamos memoria en Dispositivo */
/* ************************************************ */
hipFree(&d_item_particle);
/** Positions **/
hipFree(&d_position_x);
hipFree(&d_position_y);
hipFree(&d_position_z);
/** Distances **/
hipFree(&d_distance_x);
hipFree(&d_distance_y);
hipFree(&d_distance_z);
hipFree(&d_distance_r);
/** Particle's mass **/
hipFree(d_mass);
/** Velocities **/
hipFree(d_velocity_x);
hipFree(d_velocity_y);
hipFree(d_velocity_z);
/** Derivatives **/
hipFree(&d_dEr);
/** Forces **/
hipFree(&d_Force_x);
hipFree(&d_Force_y);
hipFree(&d_Force_z);
hipFree(d_Force_x_resultant);
hipFree(d_Force_y_resultant);
hipFree(d_Force_z_resultant);
/** Kinetic Energy **/
hipFree(d_kinetic_energy);
hipFree(d_kinetic_energy_x);
hipFree(d_kinetic_energy_y);
hipFree(d_kinetic_energy_z);
/* ************************************************ */
/* Liberamos memoria en Host */
/* ************************************************ */
free(h_sigma);
free(h_epsilon);
free(h_mass);
/** Matriz de Lennard Jones **/
if(derivative)
free(h_dLJPot);
else
free(h_LJPot);
free(h_item_particle);
/** Positions **/
free(h_position_x);
free(h_position_y);
free(h_position_z);
/** Distances **/
free(h_distance_x);
free(h_distance_y);
free(h_distance_z);
free(h_distance_r);
/** Velocities **/
free(h_velocity_x);
free(h_velocity_y);
free(h_velocity_z);
/** Chargue **/
free(h_chargue);
/** Forces **/
free(h_Force_x);
free(h_Force_y);
free(h_Force_z);
free(h_Force_x_resultant);
free(h_Force_y_resultant);
free(h_Force_z_resultant);
/** Kinetic Energy **/
free(h_kinetic_energy);
free(h_kinetic_energy_x);
free(h_kinetic_energy_y);
free(h_kinetic_energy_z);
return 0;
}
| c67cde274e223c5bf95509abd1a2d3076f0f90cf.cu | /* Filename: main.cu **************************************************************************** /
*
* INPUT:
* -Particulas.in:
* cantParticles
* type x y z Vx Vy Vz q ; where
* dt ; (x,y,z) = posición respecto de algún (0,0,0)
* temp0 ; (Vx,Vy,Vz) = Velocidades iniciales
* tempi ; q = carga
* tautp ; dt = delta_tiempo
* ; temp0 = temperatura target
* ; tempi = temperatura inicial (No se usa aún)
* ; tautp = factor de corrección de velocidades
*
*
*
* -TablaCoeficientesLennard
* type sigma epsilon mass min max ; donde min y max indican de qué valor
* ; a qué valor hay que densificar las muestras
* ; (NO ESTA IMPLEMENTADO AUN)
*
* ALGORITMO:
* 1-Levantar Coeficientes
* 2-Armar matriz de lennard para cant_samples_r muestras
* Para cada tipo de partícula:
* Calcular en funcion de los coeficientes el potencial para cant_samples_r valores r
* 3-Levantar partículas
* Ordenar y armar índices
* Para cada iteración de MD:
* 4-Calcular distancias:
* Cada partícula contra todas las otras
* Armar matriz de distancias
* 5-Calcular las derivadas respecto de r para cada par de partículas
* 6-Calcular fuerza para cada particula:
* Cada partícula contra todas las otras: matriz 3D
* Obtener fuerza resultante para cada partícula: vector 3D
* 7-Calcular nuevas posiciones: vector 3D
*
***************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <vector>
#include <algorithm>
#include <cmath>
#include <string>
#include <iomanip>
#include <sys/time.h>
/** **************************************************************** **/
/** ************* DEFAULT GLOBAL VARIABLES VALUES ****************** **/
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 16
#define BLOCK_SIZE (BLOCK_SIZE_X*BLOCK_SIZE_Y)
//#define TEXTURE_MEM_SIZE 5000
#define DIF_FINITAS_DELTA 4
/** Variables físicas **/
#define CANT_TYPES 37
#define MAx 15
#define MIn 0.3
#define DIST (MAx - MIn)
#define DELTA_TIEMPO 0.001
#define TEMP 100
#define TAO 0.1
#define BOX_MAX 999 // distancia máxima del 0 para cada coordenada
// Determinamos un cubo de volumen = (2*BOX_MAX) ^3
/** Filenames **/
char* lennardTableFileName = "Input_Mache/TablaCoeficientesLennard";
char* particlesFileName = "Input_Mache/particles.in";
char* debugOutputFilename = "Output_Mache/debug.out";
char* outputFilename = "Output_Mache/results.out";
char* crdFilename = "Output_Mache/mdcrd";
char* timeFilename = "Output_Mache/times.out";
using namespace std;
// streamsize ss = cout.precision();
/** **************************************************************** **/
/** ******************** GLOBAL VARIABLES ************************** **/
texture <float, cudaTextureType2D,cudaReadModeElementType> texRef;
double delta_tiempo = DELTA_TIEMPO;
double temp0 = TEMP;
double tempi;
double tautp = TAO;
double Boltzmann_cte = 0.0019872041;
double box_max_x = BOX_MAX;
double box_max_y = BOX_MAX;
double box_max_z = BOX_MAX;
bool box = true;
double cut = 12;
int cant_steps = 1;
int cant_types = CANT_TYPES;
int TEXTURE_MEM_SIZE=65000;
bool derivative = false;
bool analytic = false;
bool results = false;
bool amberResults = false;
bool coordinates = false;
bool periodicity = false;
/** **************************************************************** **/
/** ************************* DEVICE ******************************* **/
__global__
void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6));
}
/** **************************************************************** **/
__global__
void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG,
double e, double s, double var, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
/* Variables */
double sig12 = (double) (s + SIG[y])/2;
double eps12 = (double) sqrt(e * EPS[y]);
double r = (double) MIn+x*var;
/* Resultado */
dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13));
}
/** **************************************************************** **/
__global__
void close_distances_kernel(double* X, double* Y, double* Z, double* R,
double* position_x, double* position_y, double* position_z,
double box_x, double box_y, double box_z, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= width || j >= height) {return;}
unsigned int pos = j*width+i;
double _X = position_x[i] - position_x[j];
double _Y = position_y[i] - position_y[j];
double _Z = position_z[i] - position_z[j];
_X = _X - box_x * round((double) _X/box_x);
_Y = _Y - box_y * round((double) _Y/box_y);
_Z = _Z - box_z * round((double) _Z/box_z);
X[pos] = _X;
Y[pos] = _Y;
Z[pos] = _Z;
R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z );
}
/** **************************************************************** **/
__global__
void distances_kernel(double* R, double* X, double* Y, double* Z,
double* x1, double* y1, double* z1, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
double x_ = x1[x] - x1[y];
double y_ = y1[x] - y1[y];
double z_ = z1[x] - z1[y];
X[y*width+x] = x_;
Y[y*width+x] = y_;
Z[y*width+x] = z_;
R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ );
}
/** **************************************************************** **/
__global__
void derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
double E_r_up = (double) tex2D( texRef, index_x + DIF_FINITAS_DELTA, t_o_p_2 );
double E_r_dwn = (double) tex2D( texRef, index_x - DIF_FINITAS_DELTA, t_o_p_2 );
double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / cant_samples_r;
dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif);
}
/** **************************************************************** **/
__global__
void direct_derivative_E_r(double* dEr, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 );
}
/** **************************************************************** **/
__global__
void E_r(double* Er, double* r, double cut, int* item_to_type,
int cant_samples_r, int cant_types, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particles **/
float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use
float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these
float row = t_o_p_2 + 0.5 + (t_o_p_1* cant_types);
/** Convierto r a subíndice de matriz de lennard-jones **/
/** r = (MAX-MIN) * X / N + MIN **/
/** x = (r-MIN) * N / (MAX-MIN) **/
float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x
Er[y*width+x] = (double) tex2D( texRef, index_x, row );
}
/* ***************************************************************** **/
/** +ANALYTIC */
/** **************************************************************** **/
__global__
void derivative_E_r_analytic(double* dEr, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
dEr[y*width+x] = (double) 24.0*eps12*( pow(sig12,6)/ pow(r[y*width+x],7) - 2 * pow(sig12,12)/ pow(r[y*width+x],13));
}
__global__
void E_r_analytic(double* Er, double* r, double cut, int* item_to_type, int cant_samples_r,
double* EPS, double* SIG, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/
/* Dentro del bloque correspondiente */
if(x >= width || y >= height) {return;}
if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;}
/* valor del Potencial para la distancia r,
* para el tipo de partícula correspondiente */
/** type of particle 2 **/
int type_i = item_to_type[x];
int type_j = item_to_type[y];
double sig12 = (double) (SIG[type_i] + SIG[type_j])/2;
double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]);
Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6));
}
/** **************************************************************** **/
/** -ANALYTIC */
/* ***************************************************************** **/
/** **************************************************************** **/
/* Fx = dE(r) / dr * (x1-x2) / r */
__global__
void Parcial_Forces_Kernel(double* force, double* dEr, double* dif, double* r, int width, int height)
{
/* Elemento de la matriz a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height) {return;}
if(x == y) {force[y*width+x] = 0; return;}
force[y*width+x] = dEr[y*width+x] * dif[y*width+x] / r[y*width+x];
}
/** **************************************************************** **/
__global__
void Resultant_Forces_Kernel(double* result, double* forces, int cant)
{
/* Elemento del vector a calcular */
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x >= cant) {return;}
int i = 0;
double tmp = 0;
int row = x*cant;
for(; i < cant; i++){
tmp += forces[row + i];
}
result[x] = tmp;
}
/** **************************************************************** **/
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
__global__
void Resultant_Velocities_Kernel(double* velocity, double* old_velocity, double* force, double* m,
int* item_to_type, double delta_tiempo, int cant_particles)
{
/* Elemento de la matriz a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant_particles) {return;}
double Vt = old_velocity[i];
int type = item_to_type[i];
double dtx = delta_tiempo*20.455;
/* Result */
velocity[i] = Vt + ( (force[i]*dtx) / m[type] );
}
/** **************************************************************** **/
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
__global__
void Resultant_Positions_Kernel(double* positions, double* velocity, double delta_tiempo, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double dtx = delta_tiempo*20.455;
positions[i] = positions[i] + (velocity[i] * dtx);
}
/** **************************************************************** **/
/* -BOX_MAX 0 BOX_MAX */
/* |-----------------|-----------------| */
__global__
void Adjustin_Positions_Kernel(double* position, double box_max, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= cant) {return;}
double pos = position[i] - box_max;
if(pos > 0){
position[i] = -box_max + fmod(pos, (double) (2*box_max));
}
if(pos < -2*box_max){
position[i] = box_max + fmod(pos, (double) (2*box_max));
}
}
/** **************************************************************** **/
/* Ek = |v|^2 * m / 2 */
/* Ek_x = (v_x)^2 * m / 2 */
__global__
void Kinetic_Energy_Kernel(double* kE, double* vold, double* v, double* m, int* item_to_type, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
double vi = vold[i] + v[i];
int type = item_to_type[i];
kE[i] = vi * vi * m[type] / 8;
}
/** **************************************************************** **/
__global__
void Total_Kinetic_Energy_Kernel(double* kE, double* Ke_x, double* Ke_y, double* Ke_z, int cant)
{
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
kE[i] = Ke_x[i] + Ke_y[i] + Ke_z[i];
}
/** **************************************************************** **/
__global__
void Corrected_Velocities_Kernel(double* vold, double* v, double lambda, int cant){
/* Elemento del vector a calcular */
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>= cant) {return;}
vold[i] = v[i] * lambda;
}
/** **************************************************************** **/
/** *************************** HOST ******************************* **/
int main( int argc, char* argv[] )
{
for(uint i = 0; i < argc; i++){
if(strcmp(argv[i], "-t") == 0){
/* outputTimeFilename */
timeFilename = argv[i+1];
}
if(strcmp(argv[i], "-a") == 0){
/* ANALYTIC mode */
analytic = true;
}
if(strcmp(argv[i], "-d") == 0){
/* DERIVATIVE mode */
derivative = true;
}
if(strcmp(argv[i], "-r") == 0){
/* RESULTS or TIMER mode */
results = true;
amberResults = true;
}
if(strcmp(argv[i], "-ar") == 0){
/* RESULTS */
amberResults = true;
}
if(strcmp(argv[i], "-c") == 0){
/* PRINT mdcrd file */
coordinates = true;
}
if(strcmp(argv[i], "-p") == 0){
/* Periodicity */
periodicity = true;
}
if(strcmp(argv[i], "-tex") == 0){
/* Periodicity */
TEXTURE_MEM_SIZE=atoi(argv[i+1]);
}
}
if (derivative)
cout << "Derivative" << endl;
if (analytic)
cout << "Analytic" << endl;
if(results){
cout << "DEBUG mode ON" << endl;
}
if(amberResults){
cout << "AMBER results ON" << endl;
}
fstream out;
fstream crd;
//if(results or amberResults){
/* Output file */
out.open(outputFilename,fstream::out);
streamsize ss = out.precision();
out << setprecision(20);
//}
if(coordinates){
/* CRD output file */
crd.open(crdFilename,fstream::out);
crd << setprecision(3);
crd.setf( std::ios::fixed, std:: ios::floatfield );
crd << " POS(x) POS(y) POS(z)" << endl;
}
struct timeval tv1, tv2;
fstream taim;
if(!results){ //timer mode ON
/* Time output file */
taim.open(timeFilename, fstream::app | fstream::out);
taim << setprecision(20);
}
/* Levantamos Coeficientes de Lennard */
ifstream table (lennardTableFileName);
table >> cant_types;
/**Variables y memoria*/
size_t cant_types_size = cant_types * sizeof(double);
vector<string> h_type;
h_type.resize(cant_types);
double* h_sigma = (double*) ( malloc(cant_types_size));
double* h_epsilon = (double*) ( malloc(cant_types_size));
double* h_mass = (double*) ( malloc(cant_types_size));
/**Levantamos datos*/
for(int j = 0; j<cant_types ; j++){
table >> h_type[j];
table >> h_sigma[j];
table >> h_epsilon[j];
table >> h_mass[j];
}
table.close();
/* Armamos matrices de lennard */
/**Variables y memoria**/
int cant_samples_r = TEXTURE_MEM_SIZE/(sizeof(float)); // cant of original sample values (máximo permitido por mem de textura)
double var = DIST / ((double) cant_samples_r); // variation of r
size_t cant_samples_r_size = cant_samples_r * sizeof(float);
float* h_dLJPot;
float* h_LJPot;
if(derivative)
h_dLJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
else
h_LJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float)
int width = cant_samples_r;
int height = cant_types;
dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y);
dim3 dimGrid( (int) ceil((double)width / (double)dimBlock.x), (int) ceil((double)height / (double)dimBlock.y) );
double* d_EPS;
double* d_SIG;
float* d_LJPot;
float* d_dLJPot;
cudaMalloc(&d_EPS, cant_types_size);
cudaMalloc(&d_SIG, cant_types_size);
cudaMemcpy(d_EPS, h_epsilon, cant_types_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_SIG, h_sigma, cant_types_size, cudaMemcpyHostToDevice);
if(derivative)
cudaMalloc(&d_dLJPot, cant_samples_r_size * cant_types);
else
cudaMalloc(&d_LJPot, cant_samples_r_size * cant_types);
/** Rellenamos datos con CUDA **/
if(derivative) {
for(int a = 0; a<cant_types; a++){
derivatives_lennard_Kernel<<<dimGrid, dimBlock>>>(d_dLJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
cudaMemcpy( (float*) &(h_dLJPot[(a*cant_samples_r*cant_types)]), d_dLJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost);
}
} else {
for(int a = 0; a<cant_types; a++){
lennard_Kernel<<<dimGrid, dimBlock>>>(d_LJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height);
cudaMemcpy( (float*) &(h_LJPot[(a*cant_samples_r*cant_types)]), d_LJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost);
}
}
/** Liberamos memoria de CUDA **/
cudaFree(&d_EPS);
cudaFree(&d_SIG);
cudaFree(&d_LJPot);
if(results){
/** DEBUG **/
if(derivative)
out << " derivative LENNARD " << endl;
else
out << " LENNARD " << endl;
for(int a = 0; a<cant_types; a++){
out << " Type = " << h_type[a] << endl << " ";
for(int i = 0; i<cant_types; i++){
for(int j = 0; j<cant_samples_r; j+= cant_samples_r/8){
if(derivative)
out << h_dLJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
else
out << h_LJPot[(a*cant_types*cant_samples_r)+(i*cant_samples_r)+j] << ", ";
}
out << endl << " ";
}
out << "***********************************************************************************" << endl;
}
/** DEBUG **/
}
/*Levantamos partículas*/
fstream particles;
particles.open(particlesFileName);
/** Variables y memoria **/
uint cant_particles;
double* h_position_x;
double* h_position_y;
double* h_position_z;
double* h_velocity_x;
double* h_velocity_y;
double* h_velocity_z;
double* h_velocity_old_x;
double* h_velocity_old_y;
double* h_velocity_old_z;
double* h_chargue;
double h_box_x;
double h_box_y;
double h_box_z;
double h_box_alpha;
double h_box_beta;
double h_box_gamma;
vector<string> h_particle_type;
particles >> cant_particles;
size_t cant_particles_size = cant_particles * sizeof(double);
h_position_x = (double*)malloc(cant_particles_size);
h_position_y = (double*)malloc(cant_particles_size);
h_position_z = (double*)malloc(cant_particles_size);
h_velocity_x = (double*)malloc(cant_particles_size);
h_velocity_y = (double*)malloc(cant_particles_size);
h_velocity_z = (double*)malloc(cant_particles_size);
h_velocity_old_x = (double*)malloc(cant_particles_size);
h_velocity_old_y = (double*)malloc(cant_particles_size);
h_velocity_old_z = (double*)malloc(cant_particles_size);
h_chargue = (double*)malloc(cant_particles_size);
h_particle_type.resize(cant_particles);
/** Guardamos datos **/
for(uint i = 0; i < cant_particles ; i++) {
particles >> h_particle_type[i];
particles >> h_position_x[i];
particles >> h_position_y[i];
particles >> h_position_z[i];
particles >> h_velocity_old_x[i];
particles >> h_velocity_old_y[i];
particles >> h_velocity_old_z[i];
particles >> h_chargue[i];
}
/** Perioricidad **/
//TODO: por ahora usamos cubo,
//situamos el cero en el centro del mismo
//Recibimos en orden x, y, z
particles >> box;
if(box){
cout << " Levantamos caja" << endl;
particles >> h_box_x;
particles >> h_box_y;
particles >> h_box_z;
particles >> h_box_alpha;
particles >> h_box_beta;
particles >> h_box_gamma;
if( h_box_alpha != 90 or h_box_beta != 90 or h_box_gamma != 90){
cout << " Se forzaron los angulos para que sea un CUBO: " << endl;
}
box_max_x = h_box_x/2;
box_max_y = h_box_y/2;
box_max_z = h_box_z/2;
}
/** Parametros **/
particles >> cant_steps;
particles >> delta_tiempo;
particles >> temp0;
particles >> tempi;
particles >> tautp;
particles >> cut;
particles.close();
// if(results){
// /** DEBUG **/
// out << " INITIAL VALUES" << endl;
// for(int i = 0; i<cant_particles; i++){
// out << " Type: " << h_particle_type[i] << " | Pos: (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")";
// out << " | Vel: (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
// }
// out << endl;
//
// /** DEBUG **/
// }
if(results){
// /** DEBUG **/
// out << " CANT of TYPES" << endl;
// for(int i = 0; i < h_type.size(); i++){
// out << " " << h_type[i] << " " << cant_of_typ[i] << endl;
// }
// out << endl;
/** DEBUG **/
}
/* Armamos estructura de items para saber de qué tipo
/* es la partícula en la que estamos en CUDA */
/** h_particle_type = H H H H H K K K K K O O O O O O O O O ... **/
/** h_item_particle = 1 1 1 1 1 3 3 3 3 3 9 9 9 9 9 9 9 9 9 ... **/
int * h_item_particle = (int*)malloc(cant_particles * sizeof(int));
int * d_item_particle;
cudaMalloc(&d_item_particle, cant_particles * sizeof(int));
/** Convertimos anotamos type de la partícula como un int que sería el index dentro de h_type **/
for(int i = 0; i< cant_particles; i++){
for(int j = 0; j< h_type.size(); j++){
if(h_type[j] == h_particle_type[i]){
h_item_particle[i] = j;
break;
}
}
}
cudaMemcpy(d_item_particle, h_item_particle, cant_particles * sizeof(int), cudaMemcpyHostToDevice);
// if(results){
// /** DEBUG **/
// out << " ITEM to TYPE" << endl;
// for(int i = 0; i < cant_particles; i++){
// out << " Particle[" << i << "] | Type: " << h_type[h_item_particle[i]] << " (index :" << h_item_particle[i] << ") " << endl;
// }
// out << endl;
// /** DEBUG **/
// }
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL DISPOSITIVO GPU */
/* ************************************************ */
/** Variables **/
size_t s_size = cant_particles_size * cant_particles;
/** Positions **/
double* d_position_x;
double* d_position_y;
double* d_position_z;
cudaMalloc(&d_position_x, cant_particles_size);
cudaMalloc(&d_position_y, cant_particles_size);
cudaMalloc(&d_position_z, cant_particles_size);
cudaMemcpy(d_position_x, h_position_x, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_y, h_position_y, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_z, h_position_z, cant_particles_size, cudaMemcpyHostToDevice);
/** Positions **/
double* d_pos_close_x;
double* d_pos_close_y;
double* d_pos_close_z;
cudaMalloc(&d_pos_close_x, cant_particles_size);
cudaMalloc(&d_pos_close_y, cant_particles_size);
cudaMalloc(&d_pos_close_z, cant_particles_size);
/** Particle's mass **/
double* d_mass;
cudaMalloc(&d_mass, cant_types_size);
cudaMemcpy(d_mass, h_mass, cant_types_size, cudaMemcpyHostToDevice);
/** Velocities **/
double* d_velocity_x;
double* d_velocity_y;
double* d_velocity_z;
double* d_velocity_old_x;
double* d_velocity_old_y;
double* d_velocity_old_z;
cudaMalloc(&d_velocity_x, cant_particles_size);
cudaMalloc(&d_velocity_y, cant_particles_size);
cudaMalloc(&d_velocity_z, cant_particles_size);
cudaMalloc(&d_velocity_old_x, cant_particles_size);
cudaMalloc(&d_velocity_old_y, cant_particles_size);
cudaMalloc(&d_velocity_old_z, cant_particles_size);
cudaMemcpy(d_velocity_old_x, h_velocity_old_x, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velocity_old_y, h_velocity_old_y, cant_particles_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velocity_old_z, h_velocity_old_z, cant_particles_size, cudaMemcpyHostToDevice);
/** Distances **/
double* d_distance_x;
double* d_distance_y;
double* d_distance_z;
double* d_distance_r;
cudaMalloc(&d_distance_x, s_size);
cudaMalloc(&d_distance_y, s_size);
cudaMalloc(&d_distance_z, s_size);
cudaMalloc(&d_distance_r, s_size);
/** Derivatives **/
double* d_dEr;
cudaMalloc(&d_dEr, s_size);
/** VDWAALS **/
double* d_Er;
cudaMalloc(&d_Er, s_size);
/** Forces **/
double* d_Force_x;
double* d_Force_y;
double* d_Force_z;
cudaMalloc(&d_Force_x, s_size);
cudaMalloc(&d_Force_y, s_size);
cudaMalloc(&d_Force_z, s_size);
double* d_Force_x_resultant;
double* d_Force_y_resultant;
double* d_Force_z_resultant;
cudaMalloc(&d_Force_x_resultant, cant_particles_size);
cudaMalloc(&d_Force_y_resultant, cant_particles_size);
cudaMalloc(&d_Force_z_resultant, cant_particles_size);
/** Kinetic Energy **/
double* d_kinetic_energy;
double* d_kinetic_energy_x;
double* d_kinetic_energy_y;
double* d_kinetic_energy_z;
cudaMalloc(&d_kinetic_energy, cant_particles_size);
cudaMalloc(&d_kinetic_energy_x, cant_particles_size);
cudaMalloc(&d_kinetic_energy_y, cant_particles_size);
cudaMalloc(&d_kinetic_energy_z, cant_particles_size);
/* ************************************************ */
/* MANEJO DE MEMORIA EN EL HOST */
/* ************************************************ */
/** Distances **/
double (*h_distance_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_distance_r)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
/** Forces **/
double (*h_Force_x)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_y)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double (*h_Force_z)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
double* h_Force_x_resultant = (double*)malloc(cant_particles_size);
double* h_Force_y_resultant = (double*)malloc(cant_particles_size);
double* h_Force_z_resultant = (double*)malloc(cant_particles_size);
/** Kinetic Energy **/
double* h_kinetic_energy = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_x = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_y = (double*)malloc(cant_particles_size);
double* h_kinetic_energy_z = (double*)malloc(cant_particles_size);
/* ************************************************ */
/* Calculamos ENERGIA CINETICA deseada */
/* ************************************************ */
/* Ek = Kb * T (3N - Nc) / 2 */
double Nc = 5;
double factor_conv_T_Ek = 2 / (Boltzmann_cte * (3 *cant_particles - Nc) );
if(amberResults){
double kinetic_Energy = Boltzmann_cte * temp0 * (3*cant_particles - Nc) / 2;
/** DEBUG **/
out << " THEORETICAL VALUES:" << endl << endl;
out << " * Kb = " << Boltzmann_cte << endl << endl;
out << " * Temperature = " << temp0 << endl << endl;
out << " * Kinetic Energy = " << kinetic_Energy << endl << endl;
out << " * Factor_conv_T_Ek = " << factor_conv_T_Ek << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Seteamos la memoria de TEXTURA */
/* ************************************************ */
cudaArray* cuLennard_i;
// if(!analytic){
/** Usamos texturas **/
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc( 32, 0, 0, 0, cudaChannelFormatKindFloat );
cudaMallocArray(&cuLennard_i, &channelDesc, cant_samples_r, cant_types*cant_types); //width x height
texRef.addressMode[0] = cudaAddressModeClamp;
texRef.filterMode = cudaFilterModeLinear; //cudaFilterModePoint; // //Tipo de interpolación
if(derivative) {
cudaMemcpyToArray(cuLennard_i, 0, 0, h_dLJPot, cant_types * cant_types * cant_samples_r_size, cudaMemcpyHostToDevice);
} else {
cudaMemcpyToArray(cuLennard_i, 0, 0, h_LJPot, cant_types * cant_types * cant_samples_r_size, cudaMemcpyHostToDevice);
}
/** Bindeamos la textura **/
cudaBindTextureToArray(texRef, cuLennard_i, channelDesc);
// }
if(amberResults){
out << endl << " ESTARTIN DE PROGRAM" << endl;
out << " Amaunt of itereishons = " << cant_steps << endl << endl;
}
for(int i=0 ; i<10000 ; i++){
for(int j=0 ; j<1000 ; j++){
}
}
/** Esperamos a que termine de bindear la textura **/
cudaDeviceSynchronize();
if(!results){ //timer mode ON
/** Arrancamos medicion del tiempo **/
gettimeofday(&tv1, NULL);
}
for(int step = 0; step < cant_steps; step++){
/* ********************************************************************************************************** */
/* ****************************************** INICIO Iteracion DM ******************************************* */
/* ********************************************************************************************************** */
if(amberResults){
out << "/* ************************************************************************************************ */" << endl;
out << "/* ************************************* INICIO Iteracion " << step << " ************************************ */" << endl;
out << "/* ************************************************************************************************ */" << endl;
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ************************************************ */
/* Calculamos Matriz de Distancias entre partículas */
/* ************************************************ */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
if(!periodicity){
distances_kernel<<<dimGrid, dimBlock>>>(d_distance_r, d_distance_x, d_distance_y, d_distance_z,
d_position_x, d_position_y, d_position_z, width, height);
} else {
/**Rellenamos datos**/
close_distances_kernel<<<dimGrid, dimBlock>>>(d_distance_x, d_distance_y, d_distance_z, d_distance_r,
d_position_x, d_position_y, d_position_z,
h_box_x, h_box_y, h_box_z, width, height);
}
if(results){
/** DEBUG **/
cudaMemcpy(h_distance_r, d_distance_r, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_distance_x, d_distance_x, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_distance_y, d_distance_y, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_distance_z, d_distance_z, s_size, cudaMemcpyDeviceToHost);
out << " DISTANCES" << endl << " ";
double (*matriz)[cant_particles] = (double (*)[cant_particles]) h_distance_r;
for(int i = 0; i<cant_particles; i+= cant_particles/8){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles/8){
out << matriz[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Derivadas */
/* ************************************************ */
/** Variables y memoria **/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
if(analytic){
derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
E_r_analytic<<<dimGrid, dimBlock>>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
// }
} else {
// /** Calculo de la derivada dE(r)/dr usando diferencias finitas **/
if(derivative){
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
direct_derivative_E_r<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
} else {
// derivative_E_r_analytic<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height);
derivative_E_r<<<dimGrid, dimBlock>>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
}
// if(amberResults){
// /** Calculo la energia E(r) para debug **/
E_r<<<dimGrid, dimBlock>>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height);
}
// }
//
// }
/*
if(!derivative){
out << " Lennard-Jones" << endl << " ";
double vdwaals = 0;
double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
cudaMemcpy(h_Er, d_Er, s_size, cudaMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
out << h_Er[i][j] << "\t";
if(i<=j)
vdwaals += h_Er[i][j];
}
out << endl << " ";
}
//out << endl;
out << " VDWAALS = " << vdwaals << endl << endl;
//taim << TEXTURE_MEM_SIZE << " " << vdwaals << endl;
free(h_Er);
}
*/
// if(amberResults){
//if(!derivative){
/** DEBUG **/
//out << " Lennard-Jones" << endl << " ";
double vdwaals = 0;
double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
cudaMemcpy(h_Er, d_Er, s_size, cudaMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i++){
//out << " " << i << " | ";
for(int j = 0; j<cant_particles; j++){
//out << h_Er[i][j] << "\t";
//if(i<=j)
vdwaals += h_Er[i][j];
}
//out << endl << " ";
}
out << endl;
out << " VDWAALS = " << vdwaals << endl << endl;
free(h_Er);
/** DEBUG **/
//}
//}
if(results){
/** DEBUG **/
out << " DERIVATIVES" << endl << " ";
double (*h_dEr)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size));
cudaMemcpy(h_dEr, d_dEr, s_size, cudaMemcpyDeviceToHost);
for(int i = 0; i<cant_particles; i+= cant_particles/8){
out << " " << i << " | ";
for(int j = 0; j<cant_particles; j+= cant_particles/8){
out << h_dEr[i][j] << "\t";
}
out << endl << " ";
}
out << endl;
free(h_dEr);
/** DEBUG **/
}
if(results){
/** DEBUG **/
cudaMemcpy(h_velocity_old_x, d_velocity_old_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_old_y, d_velocity_old_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_old_z, d_velocity_old_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " OLD VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_old_x[i] << " , " << h_velocity_old_y[i] << " , " << h_velocity_old_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos FUERZAS resultantes */
/* ************************************************ */
/* Fx = dE(r) / dr * (x1-x2) / r *
* Fy = dE(r) / dr * (y1-y2) / r *
* Fz = dE(r) / dr * (z1-z2) / r */
/* Calculo de vectores parciales */
/**Variables y memoria*/
width = cant_particles;
height = cant_particles;
dimGrid.x = ceil((double)width / (double)dimBlock.x);
dimGrid.y = ceil((double)height / (double)dimBlock.y);
/** Calculo del vector F **/
Parcial_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_x, d_dEr, d_distance_x, d_distance_r, width, height);
Parcial_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_y, d_dEr, d_distance_y, d_distance_r, width, height);
Parcial_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_z, d_dEr, d_distance_z, d_distance_r, width, height);
//if(results){
/** DEBUG **/
//**************************************************
//*********IMPRIMO LAS FUERZAS*********************
//*************************************************
/*
cudaMemcpy(h_Force_x, d_Force_x, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_y, d_Force_y, s_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_z, d_Force_z, s_size, cudaMemcpyDeviceToHost);
out << " FORCES" << endl << " ";
for(int i = 0; i<cant_particles; i++){
for(int j = 0; j<cant_particles; j++){
out << "(" << h_Force_x[i][j] << " , " << h_Force_y[i][j] << " , " << h_Force_z[i][j] << ")\t";
if (j==0 && i==1)
taim << TEXTURE_MEM_SIZE << " " << h_Force_x[i][j] << endl;
}
out << endl << " ";
}
out << endl;
//}
*/
/* Calculo del vector F */
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
Resultant_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_x_resultant, d_Force_x, cant_particles);
Resultant_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_y_resultant, d_Force_y, cant_particles);
Resultant_Forces_Kernel<<<dimGrid, dimBlock>>>(d_Force_z_resultant, d_Force_z, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_Force_x_resultant, d_Force_x_resultant, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_y_resultant, d_Force_y_resultant, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Force_z_resultant, d_Force_z_resultant, cant_particles_size, cudaMemcpyDeviceToHost);
out << " RESULTANT FORCES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_Force_x_resultant[i] << " , " << h_Force_y_resultant[i] << " , " << h_Force_z_resultant[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos VELOCIDADES Resultantes */
/* ************************************************ */
/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Piso las velocidades acumuladas al tiempo t con las nuevas de t+Dt */
Resultant_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_x, d_velocity_old_x, d_Force_x_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
Resultant_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_y, d_velocity_old_y, d_Force_y_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
Resultant_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_z, d_velocity_old_z, d_Force_z_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_velocity_x, d_velocity_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_y, d_velocity_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_z, d_velocity_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos POSICIONES Resultantes */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/* (TODO: ajustar condiciones de perioricidad */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
Resultant_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_x, d_velocity_x, delta_tiempo, cant_particles);
Resultant_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_y, d_velocity_y, delta_tiempo, cant_particles);
Resultant_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_z, d_velocity_z, delta_tiempo, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " RESULTANT POSITIONS" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
if(periodicity){
/* ************************************************ */
/* Calculamos POSICIONES con PERIORICIDAD */
/* ************************************************ */
/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
Adjustin_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_x, box_max_x, cant_particles);
Adjustin_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_y, box_max_y, cant_particles);
Adjustin_Positions_Kernel<<<dimGrid, dimBlock>>>(d_position_z, box_max_z, cant_particles);
}
if(coordinates){
/** DEBUG **/
cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost);
if(results){
out << " RESULTANT POSITIONS in the CUBE" << endl;
for(int i = 0; i<cant_particles; i++){
out << i+1 << ": (" << h_particle_type[i] << " (" << h_position_x[i] << " , " << h_position_y[i] << " , " << h_position_z[i] << ")" << endl;
}
out << endl;
}
for(int i = 0; i<cant_particles; i+=2){
crd << " " << h_position_x[i] << " " << h_position_y[i] << " " << h_position_z[i];
if(i+1 < cant_particles){
crd << " " << h_position_x[i+1] << " " << h_position_y[i+1] << " " << h_position_z[i+1] << endl;
} else
crd << endl;
}
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek de cada partícula */
/* ************************************************ */
/* Ek = |vp|^2 * m / 2 con vp = (vold+v)/2 */
/* Ek_x = (v_x)^2 * m / 2 */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la energía cinética para las tres coordenadas de cada partícula **/
/** Puede hacerse directamente así, sin calcular módulo por propiedades algebraicas **/
Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy_x, d_velocity_old_x, d_velocity_x, d_mass, d_item_particle, cant_particles);
Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy_y, d_velocity_old_y, d_velocity_y, d_mass, d_item_particle, cant_particles);
Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy_z, d_velocity_old_z, d_velocity_z, d_mass, d_item_particle, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_kinetic_energy_x, d_kinetic_energy_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_kinetic_energy_y, d_kinetic_energy_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_kinetic_energy_z, d_kinetic_energy_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << i+1 << ": (" << h_kinetic_energy_x[i] << " , " << h_kinetic_energy_y[i] << " , " << h_kinetic_energy_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Ek Resultante */
/* ************************************************ */
/* Ek_TOT = sum (Ek_i) */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Calculamos la Energía cinética total de cada partícula **/
Total_Kinetic_Energy_Kernel<<<dimGrid, dimBlock>>>(d_kinetic_energy, d_kinetic_energy_x, d_kinetic_energy_y, d_kinetic_energy_z, cant_particles);
/* */
/** Calculamos la Energía cinética total del sistema **/
cudaMemcpy(h_kinetic_energy, d_kinetic_energy, cant_particles_size, cudaMemcpyDeviceToHost);
double Ek_TOT = 0;
for(int i = 0; i<cant_particles; i++){
Ek_TOT += h_kinetic_energy[i];
}
if(results){
/** DEBUG **/
out << " KINETIC ENERGY" << endl;
for(int i = 0; i<cant_particles; i++){
out << " " << i << " | ";
out << " " << h_kinetic_energy[i] << endl;
}
out << endl;
/** DEBUG **/
}
//if(amberResults){
out << " Total Kinetic Energy(t) = " << Ek_TOT << endl << endl;
// }
/* ************************************************ */
/* Calculamos Temperatura Resultante */
/* ************************************************ */
/* T(t) = 2*Ek_TOT / (Kb*(3N-Nc)) */
double Temp_TOT = Ek_TOT * factor_conv_T_Ek;
//if(amberResults){
/** DEBUG **/
out << " Temp(t) = " << Temp_TOT << endl << endl;
/** DEBUG **/
//}
/* *********************************************** */
/* Calculamos Factor de Correccion */
/* *********************************************** */
/* lambda = sqrt( 1 + 2 * dt / tautp * (T/T(t) -1) ) */
double lambda = sqrt( 1 + delta_tiempo / tautp * (temp0/Temp_TOT -1) );
if(amberResults){
/** DEBUG **/
out << " lambda(t) = " << lambda << endl << endl;
/** DEBUG **/
}
/* ************************************************ */
/* Calculamos Velocidades Corregidas */
/* ************************************************ */
/* vi = lambda * vi */
/**Variables y memoria*/
dimBlock.x = 1024;
dimBlock.y = 1;
dimGrid.x = ceil((double)cant_particles / (double)dimBlock.x);
dimGrid.y = 1;
/** Piso las velocidades acumuladas al tiempo t+Dt con las nuevas de t+Dt corregidas */
//Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_x, d_velocity_x, lambda, cant_particles);
//Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_y, d_velocity_y, lambda, cant_particles);
//Corrected_Velocities_Kernel<<<dimGrid, dimBlock>>>(d_velocity_old_z, d_velocity_z, lambda, cant_particles);
if(results){
/** DEBUG **/
cudaMemcpy(h_velocity_x, d_velocity_old_x, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_y, d_velocity_old_y, cant_particles_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_z, d_velocity_old_z, cant_particles_size, cudaMemcpyDeviceToHost);
out << " CORRECTED RESULTANT VELOCITIES" << endl;
for(int i = 0; i<cant_particles; i++){
out << i << ": (" << h_velocity_x[i] << " , " << h_velocity_y[i] << " , " << h_velocity_z[i] << ")" << endl;
}
out << endl;
/** DEBUG **/
}
dimBlock.x = BLOCK_SIZE_X;
dimBlock.y = BLOCK_SIZE_Y;
/* ********************************************************************************************************** */
/* ******************************************* FIN Iteracion DM ********************************************* */
/* ********************************************************************************************************** */
}
if(!results){ //timer mode ON
gettimeofday(&tv2, NULL);
taim << cant_steps << " " << (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec) << endl;
}
// if(!analytic){
/** Unbindeamos Textura y liberamos memoria **/
cudaUnbindTexture(texRef);
cudaFreeArray(cuLennard_i);
// }
// if(results or amberResults){
out.close();
// }
if(coordinates){
crd.close();
}
/* ************************************************ */
/* Liberamos memoria en Dispositivo */
/* ************************************************ */
cudaFree(&d_item_particle);
/** Positions **/
cudaFree(&d_position_x);
cudaFree(&d_position_y);
cudaFree(&d_position_z);
/** Distances **/
cudaFree(&d_distance_x);
cudaFree(&d_distance_y);
cudaFree(&d_distance_z);
cudaFree(&d_distance_r);
/** Particle's mass **/
cudaFree(d_mass);
/** Velocities **/
cudaFree(d_velocity_x);
cudaFree(d_velocity_y);
cudaFree(d_velocity_z);
/** Derivatives **/
cudaFree(&d_dEr);
/** Forces **/
cudaFree(&d_Force_x);
cudaFree(&d_Force_y);
cudaFree(&d_Force_z);
cudaFree(d_Force_x_resultant);
cudaFree(d_Force_y_resultant);
cudaFree(d_Force_z_resultant);
/** Kinetic Energy **/
cudaFree(d_kinetic_energy);
cudaFree(d_kinetic_energy_x);
cudaFree(d_kinetic_energy_y);
cudaFree(d_kinetic_energy_z);
/* ************************************************ */
/* Liberamos memoria en Host */
/* ************************************************ */
free(h_sigma);
free(h_epsilon);
free(h_mass);
/** Matriz de Lennard Jones **/
if(derivative)
free(h_dLJPot);
else
free(h_LJPot);
free(h_item_particle);
/** Positions **/
free(h_position_x);
free(h_position_y);
free(h_position_z);
/** Distances **/
free(h_distance_x);
free(h_distance_y);
free(h_distance_z);
free(h_distance_r);
/** Velocities **/
free(h_velocity_x);
free(h_velocity_y);
free(h_velocity_z);
/** Chargue **/
free(h_chargue);
/** Forces **/
free(h_Force_x);
free(h_Force_y);
free(h_Force_z);
free(h_Force_x_resultant);
free(h_Force_y_resultant);
free(h_Force_z_resultant);
/** Kinetic Energy **/
free(h_kinetic_energy);
free(h_kinetic_energy_x);
free(h_kinetic_energy_y);
free(h_kinetic_energy_z);
return 0;
}
|
e0676b51fbe8db26bc998ea071713bcbe7501b41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PrefixSum.cuh"
/**
* @brief Up-Sweep
*/
__device__ void up_sweep_2048(
uint* data_block
) {
uint starting_elem = 1;
for (uint i=2; i<=2048; i<<=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const uint element = starting_elem + (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
data_block[element] += data_block[element - (i>>1)];
}
}
starting_elem += i;
__syncthreads();
}
}
/**
* @brief Down-sweep
*/
__device__ void down_sweep_2048(
uint* data_block
) {
for (uint i=2048; i>=2; i>>=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const auto element = 2047 - (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
const auto other_element = element - (i>>1);
const auto value = data_block[other_element];
data_block[other_element] = data_block[element];
data_block[element] += value;
}
}
__syncthreads();
}
}
__device__ void prefix_sum_single_block_implementation(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
// Prefix sum of elements in dev_array
// Using Blelloch scan https://www.youtube.com/watch?v=mmYv3Haj6uc
__shared__ uint data_block [2048];
// Let's do it in blocks of 2048 (2^11)
unsigned prev_last_elem = 0;
for (uint block=0; block<(array_size>>11); ++block) {
const uint first_elem = block << 11;
// Load elements into shared memory, add prev_last_elem
data_block[2*threadIdx.x] = dev_array[first_elem + 2*threadIdx.x];
data_block[2*threadIdx.x + 1] = dev_array[first_elem + 2*threadIdx.x + 1];
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
const uint new_last_elem = data_block[2047];
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
dev_array[first_elem + 2*threadIdx.x] = data_block[2*threadIdx.x] + prev_last_elem;
dev_array[first_elem + 2*threadIdx.x + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
prev_last_elem += new_last_elem;
__syncthreads();
}
// Last iteration is special because
// it may contain an unspecified number of elements
const auto elements_remaining = array_size & 0x7FF; // % 2048
if (elements_remaining > 0) {
const auto first_elem = array_size - elements_remaining;
// Initialize all elements to zero
data_block[2*threadIdx.x] = 0;
data_block[2*threadIdx.x + 1] = 0;
// Load elements
const auto elem_index = first_elem + 2 * threadIdx.x;
if (elem_index < array_size) {
data_block[2*threadIdx.x] = dev_array[elem_index];
}
if ((elem_index+1) < array_size) {
data_block[2*threadIdx.x + 1] = dev_array[elem_index + 1];
}
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
// Store sum of all elements
if (threadIdx.x==0) {
dev_total_sum[0] = prev_last_elem + data_block[2047];
}
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
if (elem_index < array_size) {
dev_array[elem_index] = data_block[2*threadIdx.x] + prev_last_elem;
}
if ((elem_index+1) < array_size) {
dev_array[elem_index + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
}
}
}
__global__ void prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
prefix_sum_single_block_implementation(dev_total_sum,
dev_array, array_size);
}
__global__ void copy_and_prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_input_array,
uint* dev_output_array,
const uint array_size
) {
// Copy the input array into the output array
for (uint i=0; i<(array_size + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < array_size) {
dev_output_array[element] = dev_input_array[element];
}
}
__syncthreads();
// Perform prefix_sum over output array
prefix_sum_single_block_implementation(dev_total_sum,
dev_output_array, array_size);
}
/**
* @brief Copies Velo track hit numbers on a consecutive container
*/
__global__ void copy_velo_track_hit_number(
const VeloTracking::TrackHits* dev_tracks,
int* dev_atomics_storage,
uint* dev_velo_track_hit_number
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const VeloTracking::TrackHits* event_tracks = dev_tracks + event_number * VeloTracking::max_tracks;
const int accumulated_tracks = dev_atomics_storage[number_of_events + event_number];
const int number_of_tracks = dev_atomics_storage[event_number];
// Pointer to velo_track_hit_number of current event
uint* velo_track_hit_number = dev_velo_track_hit_number + accumulated_tracks;
for (int i=0; i<(number_of_tracks + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < number_of_tracks) {
velo_track_hit_number[element] = event_tracks[element].hitsNum;
}
}
}
| e0676b51fbe8db26bc998ea071713bcbe7501b41.cu | #include "PrefixSum.cuh"
/**
* @brief Up-Sweep
*/
__device__ void up_sweep_2048(
uint* data_block
) {
uint starting_elem = 1;
for (uint i=2; i<=2048; i<<=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const uint element = starting_elem + (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
data_block[element] += data_block[element - (i>>1)];
}
}
starting_elem += i;
__syncthreads();
}
}
/**
* @brief Down-sweep
*/
__device__ void down_sweep_2048(
uint* data_block
) {
for (uint i=2048; i>=2; i>>=1) {
for (uint j=0; j<(2047 + blockDim.x) / i; ++j) {
const auto element = 2047 - (j*blockDim.x + threadIdx.x) * i;
if (element < 2048) {
const auto other_element = element - (i>>1);
const auto value = data_block[other_element];
data_block[other_element] = data_block[element];
data_block[element] += value;
}
}
__syncthreads();
}
}
__device__ void prefix_sum_single_block_implementation(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
// Prefix sum of elements in dev_array
// Using Blelloch scan https://www.youtube.com/watch?v=mmYv3Haj6uc
__shared__ uint data_block [2048];
// Let's do it in blocks of 2048 (2^11)
unsigned prev_last_elem = 0;
for (uint block=0; block<(array_size>>11); ++block) {
const uint first_elem = block << 11;
// Load elements into shared memory, add prev_last_elem
data_block[2*threadIdx.x] = dev_array[first_elem + 2*threadIdx.x];
data_block[2*threadIdx.x + 1] = dev_array[first_elem + 2*threadIdx.x + 1];
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
const uint new_last_elem = data_block[2047];
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
dev_array[first_elem + 2*threadIdx.x] = data_block[2*threadIdx.x] + prev_last_elem;
dev_array[first_elem + 2*threadIdx.x + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
prev_last_elem += new_last_elem;
__syncthreads();
}
// Last iteration is special because
// it may contain an unspecified number of elements
const auto elements_remaining = array_size & 0x7FF; // % 2048
if (elements_remaining > 0) {
const auto first_elem = array_size - elements_remaining;
// Initialize all elements to zero
data_block[2*threadIdx.x] = 0;
data_block[2*threadIdx.x + 1] = 0;
// Load elements
const auto elem_index = first_elem + 2 * threadIdx.x;
if (elem_index < array_size) {
data_block[2*threadIdx.x] = dev_array[elem_index];
}
if ((elem_index+1) < array_size) {
data_block[2*threadIdx.x + 1] = dev_array[elem_index + 1];
}
__syncthreads();
up_sweep_2048((uint*) &data_block[0]);
// Store sum of all elements
if (threadIdx.x==0) {
dev_total_sum[0] = prev_last_elem + data_block[2047];
}
__syncthreads();
data_block[2047] = 0;
__syncthreads();
down_sweep_2048((uint*) &data_block[0]);
// Store back elements
if (elem_index < array_size) {
dev_array[elem_index] = data_block[2*threadIdx.x] + prev_last_elem;
}
if ((elem_index+1) < array_size) {
dev_array[elem_index + 1] = data_block[2*threadIdx.x + 1] + prev_last_elem;
}
}
}
__global__ void prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_array,
const uint array_size
) {
prefix_sum_single_block_implementation(dev_total_sum,
dev_array, array_size);
}
__global__ void copy_and_prefix_sum_single_block(
uint* dev_total_sum,
uint* dev_input_array,
uint* dev_output_array,
const uint array_size
) {
// Copy the input array into the output array
for (uint i=0; i<(array_size + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < array_size) {
dev_output_array[element] = dev_input_array[element];
}
}
__syncthreads();
// Perform prefix_sum over output array
prefix_sum_single_block_implementation(dev_total_sum,
dev_output_array, array_size);
}
/**
* @brief Copies Velo track hit numbers on a consecutive container
*/
__global__ void copy_velo_track_hit_number(
const VeloTracking::TrackHits* dev_tracks,
int* dev_atomics_storage,
uint* dev_velo_track_hit_number
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const VeloTracking::TrackHits* event_tracks = dev_tracks + event_number * VeloTracking::max_tracks;
const int accumulated_tracks = dev_atomics_storage[number_of_events + event_number];
const int number_of_tracks = dev_atomics_storage[event_number];
// Pointer to velo_track_hit_number of current event
uint* velo_track_hit_number = dev_velo_track_hit_number + accumulated_tracks;
for (int i=0; i<(number_of_tracks + blockDim.x - 1) / blockDim.x; ++i) {
const auto element = i*blockDim.x + threadIdx.x;
if (element < number_of_tracks) {
velo_track_hit_number[element] = event_tracks[element].hitsNum;
}
}
}
|
5f9b69f25bd0806dea389ad31c5c4567e011210a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
int BLOCK_SIZE;
__global__ void pivot_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int dist_block[];
int i = threadIdx.x;
int j = threadIdx.y;
int x = i + k * BLOCK_SIZE, y = j + k * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
pivot_col_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_col_matrix[r * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_row_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
pivot_row_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_row_matrix[c * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void res_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
r += (r >= k);
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed_slave(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset,
int *pivot_row_matrix, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x + row_offset;
int c = blockIdx.y;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = pivot_col_matrix[r * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
dist_pivot_col[i * (BLOCK_SIZE) + j] = pivot_row_matrix[c * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
__syncthreads();
if (r == k){
dist_matrix[x * N + y] = dist_pivot_col[i * (BLOCK_SIZE) + j];
return;
}
else if (c == k){
dist_matrix[x * N + y] = dist_pivot_row[j * (BLOCK_SIZE) + i];
return;
}
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = res;
}
__global__ void checkmin(int *dist_matrix, int *dist_matrix2, int N, int BLOCK_SIZE, int row_offset){
int id = (threadIdx.x + (blockIdx.x + row_offset) * BLOCK_SIZE) * N + (blockIdx.y * BLOCK_SIZE + threadIdx.y);
int res = dist_matrix2[id];
if (res < dist_matrix[id])
dist_matrix[id] = res;
}
void output(int n, int N, int* dist_matrix){
int i, j;
printf("=====\n");
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j)
if (dist_matrix[i * N + j] < N * 101)
printf((j + 1 < n) ? "%d " : "%d\n", dist_matrix[i * N + j]);
else
printf((j + 1 < n) ? "INF " : "INF\n");
}
void output_pivot(int n, int N, int BLOCK_SIZE, int* pivot_matrix){
int i, j, k;
printf("-----\n");
for (k = 0; k < N / BLOCK_SIZE; ++k){
for (i = 0; i < BLOCK_SIZE; ++i){
for (j = 0; j < BLOCK_SIZE; ++j)
printf("%d ", pivot_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j]);
printf("\n");
}
}
}
#define INPUT_BUF_SIZE 1000000000
#define OUTPUT_BUF_SIZE 1000000000
char input_buf[INPUT_BUF_SIZE], output_buf[OUTPUT_BUF_SIZE];
int input_cur_pt, output_cur_pt;
void bufReRead(){
printf("new read\n");
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
input_cur_pt = 0;
}
int getIntFromBuf(){
char x = ' ';
while (!(x >= '0' && x <= '9')){
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
int ret = 0;
while (x >= '0' && x <= '9'){
ret = ret * 10 + x - '0';
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
return ret;
}
void putIntToBuf(int x){
if (x == 0){
output_buf[output_cur_pt++] = '0';
return;
}
int len = 0;
int out[8];
memset(out, 0, sizeof out);
for (; ; ){
int t = x / 10;
out[++len] = x - t * 10;
x = t;
if (x == 0) break;
}
for (int i = len; i >= 1; --i)
output_buf[output_cur_pt++] = out[i] + '0';
}
int main(int argc, char** argv){
char *input_filename = argv[1];
char *output_filename = argv[2];
BLOCK_SIZE = atoi(argv[3]);
BLOCK_SIZE = min(BLOCK_SIZE, 32);
BLOCK_SIZE = 16;
/* input & output device */
input_cur_pt = 0;
output_cur_pt = 0;
freopen(input_filename, "r", stdin);
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
/*
FOR CUDA
if (BLOCK_SIZE < 32 && BLOCK_SIZE >= 24) BLOCK_SIZE = 24;
if (BLOCK_SIZE < 24 && BLOCK_SIZE >= 16) BLOCK_SIZE = 16;
if (BLOCK_SIZE < 16 && BLOCK_SIZE >= 8) BLOCK_SIZE = 8;
if (BLOCK_SIZE < 8) BLOCK_SIZE = 8;
*/
int i, j;
int n, m;
/*scanf("%d%d", &n, &m);*/
n = getIntFromBuf();
m = getIntFromBuf();
/* Padding */
int num_blocks = n / BLOCK_SIZE;
if (num_blocks * BLOCK_SIZE < n)
num_blocks ++;
int N = num_blocks * BLOCK_SIZE;
int* dist_matrix = (int*)malloc(sizeof(int) * N * N);
/* read in data */
for (i = 0; i < N * N; ++i)
dist_matrix[i] = N * 101;
for (i = 0; i < N; ++i)
dist_matrix[i * N + i] = 0;
for (i = 0; i < m; ++i){
int x, y, w;
/*scanf("%d%d%d", &x, &y, &w);*/
x = getIntFromBuf();
y = getIntFromBuf();
w = getIntFromBuf();
x--;
y--;
if (dist_matrix[x * N + y] > w)
dist_matrix[x * N + y] = w;
}
int* d_dist_matrix;
int* d_pivot_row;
int* d_pivot_col;
int* vd_dist_matrix;
int* vd_pivot_row;
int* vd_pivot_col;
int* d_bak_matrix;
int size = sizeof(int) * N * N;
int pivot_line_size = sizeof(int) * N * BLOCK_SIZE;
int *pivot_row = (int*)malloc(pivot_line_size);
int *pivot_col = (int*)malloc(pivot_line_size);
/* block ASPA */
hipStream_t stream[2];
hipEvent_t fin, fin0;
hipSetDevice(0);
hipStreamCreate(&stream[0]);
hipEventCreate(&fin0);
hipMalloc((void**)&d_dist_matrix, size);
hipMalloc((void**)&d_pivot_row, pivot_line_size);
hipMalloc((void**)&d_pivot_col, pivot_line_size);
hipMalloc((void**)&d_bak_matrix, size);
hipMemcpy(d_dist_matrix, dist_matrix, size, hipMemcpyHostToDevice);
hipSetDevice(1);
hipStreamCreate(&stream[1]);
hipEventCreate(&fin);
hipMalloc((void**)&vd_dist_matrix, size);
hipMalloc((void**)&vd_pivot_row, pivot_line_size);
hipMalloc((void**)&vd_pivot_col, pivot_line_size);
hipDeviceEnablePeerAccess(0, 0);
hipSetDevice(0);
hipDeviceEnablePeerAccess(1, 0);
if (num_blocks > 10)
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
int master_task = (num_blocks - 1) * 0.70;
int num_row_blocks = master_task + (i <= master_task);
int slave_task = num_blocks - num_row_blocks;
dim3 blockPerGrid_master(master_task, num_blocks - 1);
dim3 blockPerGrid_slave(slave_task, num_blocks);
hipSetDevice(0);
hipLaunchKernelGGL(( pivot_floyed), dim3(1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE), stream[0], d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #2: pivot row & col blocks */
hipLaunchKernelGGL(( pivot_row_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_row);
hipLaunchKernelGGL(( pivot_col_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_col);
hipMemcpyPeerAsync(vd_pivot_col, 1, d_pivot_col, 0, pivot_line_size, stream[0]);
hipMemcpyPeerAsync(vd_pivot_row, 1, d_pivot_row, 0, pivot_line_size, stream[0]);
hipEventRecord(fin0, stream[0]);
/* phrase #3: other blocks */
hipLaunchKernelGGL(( res_floyed), dim3(blockPerGrid_master), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0], d_dist_matrix, N, i, BLOCK_SIZE);
hipSetDevice(1);
hipStreamWaitEvent(stream[1], fin0, 0);
hipLaunchKernelGGL(( res_floyed_slave), dim3(blockPerGrid_slave), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[1], vd_dist_matrix, N, i, BLOCK_SIZE, num_row_blocks,
vd_pivot_row, vd_pivot_col);
int offset = num_row_blocks * BLOCK_SIZE * N;
hipMemcpyPeerAsync(&d_bak_matrix[offset], 0, &vd_dist_matrix[offset], 1, size - offset * sizeof(int), stream[1]);
hipEventRecord(fin, stream[1]);
hipSetDevice(0);
hipStreamWaitEvent(stream[0], fin, 0);
hipLaunchKernelGGL(( checkmin), dim3(blockPerGrid_slave), dim3(threadsPerBlock), 0, stream[0], d_dist_matrix, d_bak_matrix, N, BLOCK_SIZE, num_row_blocks);
}
else
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blockPerGrid(num_blocks - 1, num_blocks - 1);
hipLaunchKernelGGL(( pivot_floyed), dim3(1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE), 0, d_dist_matrix, N, i, BLOCK_SIZE);
if (num_blocks > 1){
/* phrase #2: pivot row & col blocks */
hipLaunchKernelGGL(( pivot_row_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, 0, d_dist_matrix, N, i, BLOCK_SIZE);
hipLaunchKernelGGL(( pivot_col_floyed), dim3(num_blocks - 1), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, 0, d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #3: other blocks */
hipLaunchKernelGGL(( res_floyed), dim3(blockPerGrid), dim3(threadsPerBlock),
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, 0, d_dist_matrix, N, i, BLOCK_SIZE);
}
}
hipMemcpy(dist_matrix, d_dist_matrix, size, hipMemcpyDeviceToHost);
freopen(output_filename, "w", stdout);
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j){
if (dist_matrix[i * N + j] < N * 101)
putIntToBuf(dist_matrix[i * N + j]);
else{
output_buf[output_cur_pt++] = 'I';
output_buf[output_cur_pt++] = 'N';
output_buf[output_cur_pt++] = 'F';
}
output_buf[output_cur_pt++] = ' ';
}
output_buf[output_cur_pt++] = '\n';
}
fwrite(output_buf, 1, output_cur_pt, stdout);
}
| 5f9b69f25bd0806dea389ad31c5c4567e011210a.cu | #include <stdio.h>
#include <stdlib.h>
int BLOCK_SIZE;
__global__ void pivot_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int dist_block[];
int i = threadIdx.x;
int j = threadIdx.y;
int x = i + k * BLOCK_SIZE, y = j + k * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_col_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
r += (r >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[x * N + pivot_y];
dist_pivot[j * BLOCK_SIZE + i] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_block[i * BLOCK_SIZE + s] + dist_pivot[j * BLOCK_SIZE + s]);
__syncthreads();
}
dist_matrix[x * N + pivot_y] = dist_block[i * BLOCK_SIZE + j];
pivot_col_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_col_matrix[r * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void pivot_row_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE, int *pivot_row_matrix){
extern __shared__ int sdata[];
int* dist_pivot = sdata;
int* dist_block = &sdata[BLOCK_SIZE * BLOCK_SIZE];
int i = threadIdx.x;
int j = threadIdx.y;
int c = blockIdx.x;
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int y = j + c * BLOCK_SIZE;
dist_block[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + y];
dist_pivot[i * BLOCK_SIZE + j] = dist_matrix[pivot_x * N + pivot_y];
__syncthreads();
int s;
for (s = 0; s < BLOCK_SIZE; ++s){
dist_block[i * BLOCK_SIZE + j] =
min(dist_block[i * BLOCK_SIZE + j],
dist_pivot[i * BLOCK_SIZE + s] + dist_block[s * BLOCK_SIZE + j]);
__syncthreads();
}
dist_matrix[pivot_x * N + y] = dist_block[i * BLOCK_SIZE + j];
pivot_row_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_pivot[i * BLOCK_SIZE + j];
pivot_row_matrix[c * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j] = dist_block[i * BLOCK_SIZE + j];
}
__global__ void res_floyed(int *dist_matrix, int N, int k, int BLOCK_SIZE){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x;
int c = blockIdx.y;
r += (r >= k);
c += (c >= k);
int pivot_x = i + k * BLOCK_SIZE, pivot_y = j + k * BLOCK_SIZE;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = dist_matrix[x * N + pivot_y];
dist_pivot_col[i * (BLOCK_SIZE) + j] = dist_matrix[pivot_x * N + y];
__syncthreads();
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = min(dist_matrix[x * N + y], res);
}
__global__ void res_floyed_slave(int *dist_matrix, int N, int k, int BLOCK_SIZE, int row_offset,
int *pivot_row_matrix, int *pivot_col_matrix){
extern __shared__ int sdata[];
int* dist_pivot_row = sdata;
int* dist_pivot_col = &sdata[(BLOCK_SIZE) * (BLOCK_SIZE)];
int i = threadIdx.x;
int j = threadIdx.y;
int r = blockIdx.x + row_offset;
int c = blockIdx.y;
int x = i + r * BLOCK_SIZE, y = j + c * BLOCK_SIZE;
dist_pivot_row[j * (BLOCK_SIZE) + i] = pivot_col_matrix[r * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
dist_pivot_col[i * (BLOCK_SIZE) + j] = pivot_row_matrix[c * (BLOCK_SIZE * BLOCK_SIZE) + i * BLOCK_SIZE + j];
__syncthreads();
if (r == k){
dist_matrix[x * N + y] = dist_pivot_col[i * (BLOCK_SIZE) + j];
return;
}
else if (c == k){
dist_matrix[x * N + y] = dist_pivot_row[j * (BLOCK_SIZE) + i];
return;
}
int s;
int res = 101 * N, cur;
for (s = 0; s < BLOCK_SIZE; ++s){
cur = dist_pivot_row[s * (BLOCK_SIZE) + i] + dist_pivot_col[s * (BLOCK_SIZE) + j];
if (cur < res) res = cur;
}
dist_matrix[x * N + y] = res;
}
__global__ void checkmin(int *dist_matrix, int *dist_matrix2, int N, int BLOCK_SIZE, int row_offset){
int id = (threadIdx.x + (blockIdx.x + row_offset) * BLOCK_SIZE) * N + (blockIdx.y * BLOCK_SIZE + threadIdx.y);
int res = dist_matrix2[id];
if (res < dist_matrix[id])
dist_matrix[id] = res;
}
void output(int n, int N, int* dist_matrix){
int i, j;
printf("=====\n");
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j)
if (dist_matrix[i * N + j] < N * 101)
printf((j + 1 < n) ? "%d " : "%d\n", dist_matrix[i * N + j]);
else
printf((j + 1 < n) ? "INF " : "INF\n");
}
void output_pivot(int n, int N, int BLOCK_SIZE, int* pivot_matrix){
int i, j, k;
printf("-----\n");
for (k = 0; k < N / BLOCK_SIZE; ++k){
for (i = 0; i < BLOCK_SIZE; ++i){
for (j = 0; j < BLOCK_SIZE; ++j)
printf("%d ", pivot_matrix[k * BLOCK_SIZE * BLOCK_SIZE + i * BLOCK_SIZE + j]);
printf("\n");
}
}
}
#define INPUT_BUF_SIZE 1000000000
#define OUTPUT_BUF_SIZE 1000000000
char input_buf[INPUT_BUF_SIZE], output_buf[OUTPUT_BUF_SIZE];
int input_cur_pt, output_cur_pt;
void bufReRead(){
printf("new read\n");
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
input_cur_pt = 0;
}
int getIntFromBuf(){
char x = ' ';
while (!(x >= '0' && x <= '9')){
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
int ret = 0;
while (x >= '0' && x <= '9'){
ret = ret * 10 + x - '0';
x = input_buf[input_cur_pt ++];
if (input_cur_pt == INPUT_BUF_SIZE)
bufReRead();
}
return ret;
}
void putIntToBuf(int x){
if (x == 0){
output_buf[output_cur_pt++] = '0';
return;
}
int len = 0;
int out[8];
memset(out, 0, sizeof out);
for (; ; ){
int t = x / 10;
out[++len] = x - t * 10;
x = t;
if (x == 0) break;
}
for (int i = len; i >= 1; --i)
output_buf[output_cur_pt++] = out[i] + '0';
}
int main(int argc, char** argv){
char *input_filename = argv[1];
char *output_filename = argv[2];
BLOCK_SIZE = atoi(argv[3]);
BLOCK_SIZE = min(BLOCK_SIZE, 32);
BLOCK_SIZE = 16;
/* input & output device */
input_cur_pt = 0;
output_cur_pt = 0;
freopen(input_filename, "r", stdin);
int len = fread(input_buf, 1, INPUT_BUF_SIZE, stdin);
if (len < INPUT_BUF_SIZE)
input_buf[len] = '\0';
/*
FOR CUDA
if (BLOCK_SIZE < 32 && BLOCK_SIZE >= 24) BLOCK_SIZE = 24;
if (BLOCK_SIZE < 24 && BLOCK_SIZE >= 16) BLOCK_SIZE = 16;
if (BLOCK_SIZE < 16 && BLOCK_SIZE >= 8) BLOCK_SIZE = 8;
if (BLOCK_SIZE < 8) BLOCK_SIZE = 8;
*/
int i, j;
int n, m;
/*scanf("%d%d", &n, &m);*/
n = getIntFromBuf();
m = getIntFromBuf();
/* Padding */
int num_blocks = n / BLOCK_SIZE;
if (num_blocks * BLOCK_SIZE < n)
num_blocks ++;
int N = num_blocks * BLOCK_SIZE;
int* dist_matrix = (int*)malloc(sizeof(int) * N * N);
/* read in data */
for (i = 0; i < N * N; ++i)
dist_matrix[i] = N * 101;
for (i = 0; i < N; ++i)
dist_matrix[i * N + i] = 0;
for (i = 0; i < m; ++i){
int x, y, w;
/*scanf("%d%d%d", &x, &y, &w);*/
x = getIntFromBuf();
y = getIntFromBuf();
w = getIntFromBuf();
x--;
y--;
if (dist_matrix[x * N + y] > w)
dist_matrix[x * N + y] = w;
}
int* d_dist_matrix;
int* d_pivot_row;
int* d_pivot_col;
int* vd_dist_matrix;
int* vd_pivot_row;
int* vd_pivot_col;
int* d_bak_matrix;
int size = sizeof(int) * N * N;
int pivot_line_size = sizeof(int) * N * BLOCK_SIZE;
int *pivot_row = (int*)malloc(pivot_line_size);
int *pivot_col = (int*)malloc(pivot_line_size);
/* block ASPA */
cudaStream_t stream[2];
cudaEvent_t fin, fin0;
cudaSetDevice(0);
cudaStreamCreate(&stream[0]);
cudaEventCreate(&fin0);
cudaMalloc((void**)&d_dist_matrix, size);
cudaMalloc((void**)&d_pivot_row, pivot_line_size);
cudaMalloc((void**)&d_pivot_col, pivot_line_size);
cudaMalloc((void**)&d_bak_matrix, size);
cudaMemcpy(d_dist_matrix, dist_matrix, size, cudaMemcpyHostToDevice);
cudaSetDevice(1);
cudaStreamCreate(&stream[1]);
cudaEventCreate(&fin);
cudaMalloc((void**)&vd_dist_matrix, size);
cudaMalloc((void**)&vd_pivot_row, pivot_line_size);
cudaMalloc((void**)&vd_pivot_col, pivot_line_size);
cudaDeviceEnablePeerAccess(0, 0);
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
if (num_blocks > 10)
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
int master_task = (num_blocks - 1) * 0.70;
int num_row_blocks = master_task + (i <= master_task);
int slave_task = num_blocks - num_row_blocks;
dim3 blockPerGrid_master(master_task, num_blocks - 1);
dim3 blockPerGrid_slave(slave_task, num_blocks);
cudaSetDevice(0);
pivot_floyed<<<1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE), stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #2: pivot row & col blocks */
pivot_row_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_row);
pivot_col_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE, d_pivot_col);
cudaMemcpyPeerAsync(vd_pivot_col, 1, d_pivot_col, 0, pivot_line_size, stream[0]);
cudaMemcpyPeerAsync(vd_pivot_row, 1, d_pivot_row, 0, pivot_line_size, stream[0]);
cudaEventRecord(fin0, stream[0]);
/* phrase #3: other blocks */
res_floyed<<<blockPerGrid_master, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[0]>>>(d_dist_matrix, N, i, BLOCK_SIZE);
cudaSetDevice(1);
cudaStreamWaitEvent(stream[1], fin0, 0);
res_floyed_slave<<<blockPerGrid_slave, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2, stream[1]>>>(vd_dist_matrix, N, i, BLOCK_SIZE, num_row_blocks,
vd_pivot_row, vd_pivot_col);
int offset = num_row_blocks * BLOCK_SIZE * N;
cudaMemcpyPeerAsync(&d_bak_matrix[offset], 0, &vd_dist_matrix[offset], 1, size - offset * sizeof(int), stream[1]);
cudaEventRecord(fin, stream[1]);
cudaSetDevice(0);
cudaStreamWaitEvent(stream[0], fin, 0);
checkmin<<<blockPerGrid_slave, threadsPerBlock, 0, stream[0]>>>(d_dist_matrix, d_bak_matrix, N, BLOCK_SIZE, num_row_blocks);
}
else
for (i = 0; i < num_blocks; ++i){
/* phrase #1: self dependent blocks */
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blockPerGrid(num_blocks - 1, num_blocks - 1);
pivot_floyed<<<1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE)>>>(d_dist_matrix, N, i, BLOCK_SIZE);
if (num_blocks > 1){
/* phrase #2: pivot row & col blocks */
pivot_row_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2>>>(d_dist_matrix, N, i, BLOCK_SIZE);
pivot_col_floyed<<<num_blocks - 1, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2>>>(d_dist_matrix, N, i, BLOCK_SIZE);
/* phrase #3: other blocks */
res_floyed<<<blockPerGrid, threadsPerBlock,
sizeof(int) * (BLOCK_SIZE) * (BLOCK_SIZE) * 2>>>(d_dist_matrix, N, i, BLOCK_SIZE);
}
}
cudaMemcpy(dist_matrix, d_dist_matrix, size, cudaMemcpyDeviceToHost);
freopen(output_filename, "w", stdout);
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j){
if (dist_matrix[i * N + j] < N * 101)
putIntToBuf(dist_matrix[i * N + j]);
else{
output_buf[output_cur_pt++] = 'I';
output_buf[output_cur_pt++] = 'N';
output_buf[output_cur_pt++] = 'F';
}
output_buf[output_cur_pt++] = ' ';
}
output_buf[output_cur_pt++] = '\n';
}
fwrite(output_buf, 1, output_cur_pt, stdout);
}
|
ac8bd2fbe447892d88d4690ae18c69ea32ac4f8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_zcgreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex_ptr vtmp,
magmaDoubleComplex_ptr vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_zcgmerge_spmvcsr_kernel(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_zcgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = dcolind [ n * k + i ];
magmaDoubleComplex val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_zcgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = dcolind [ num_cols_per_row * i + k ];
magmaDoubleComplex val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_zcgmerge_spmvellpackrt_kernel_8(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = (drowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_zcgmerge_spmvellpackrt_kernel_16(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = (drowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_zcgmerge_spmvellpackrt_kernel_32(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = (drowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_zcgmerge_spmvellpackrt_kernel2(
int n,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_Z_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_zcgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if(i < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++){
int col = dcolind [offset+ blocksize * n + Idx ];
magmaDoubleComplex val = dval[offset+ blocksize * n + Idx];
if( val != 0){
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_zcgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_zcgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_zcgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_zcg_rhokernel(
magmaDoubleComplex_ptr skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_z_sparse_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dd magmaDoubleComplex_ptr
input vector d
@param[out]
dz magmaDoubleComplex_ptr
input vector z
@param[out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zcgmerge_spmv1(
magma_z_sparse_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dd,
magmaDoubleComplex_ptr dz,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
hipLaunchKernelGGL(( magma_zcgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
hipLaunchKernelGGL(( magma_zcgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
hipLaunchKernelGGL(( magma_zcgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_SELLP ) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = sqrt(A.numblocks);
int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( magmaDoubleComplex );
if ( A.alignment == 8)
hipLaunchKernelGGL(( magma_zcgmerge_spmvsellpt_kernel_8)
, dim3(gridsellp), dim3(block), Mssellp, queue ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
hipLaunchKernelGGL(( magma_zcgmerge_spmvsellpt_kernel_16)
, dim3(gridsellp), dim3(block), Mssellp, queue ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
hipLaunchKernelGGL(( magma_zcgmerge_spmvsellpt_kernel_32)
, dim3(gridsellp), dim3(block), Mssellp, queue ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize);
int num_threads = A.alignment*A.blocksize;
int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment)
*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( magmaDoubleComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel_32)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel_16)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel_8)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", A.alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue ,
A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zcg_rhokernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_zcgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
magmaDoubleComplex rho = skp[3];
magmaDoubleComplex mrho = MAGMA_Z_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if( i<n ){
x[i] += rho * d[i] ;
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_zcg_alphabetakernel(
magmaDoubleComplex_ptr skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_Z_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_zcg_d_kernel(
int n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex alpha = skp[0];
if( i<n ){
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in/out]
dx magmaDoubleComplex_ptr
input vector x
@param[in/out]
dr magmaDoubleComplex_ptr
input/output vector r
@param[in]
dd magmaDoubleComplex_ptr
input vector d
@param[in]
dz magmaDoubleComplex_ptr
input vector z
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zsygpuk
********************************************************************/
extern "C" magma_int_t
magma_zcgmerge_xrbeta(
int n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dd,
magmaDoubleComplex_ptr dz,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_zcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+1, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
dim3 Bs3( local_block_size );
dim3 Gs3( (n+local_block_size-1)/local_block_size );
hipLaunchKernelGGL(( magma_zcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, 0, n, skp, dr, dd );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| ac8bd2fbe447892d88d4690ae18c69ea32ac4f8c.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_zcgreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex_ptr vtmp,
magmaDoubleComplex_ptr vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_zcgmerge_spmvcsr_kernel(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_zcgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = dcolind [ n * k + i ];
magmaDoubleComplex val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_zcgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = dcolind [ num_cols_per_row * i + k ];
magmaDoubleComplex val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_zcgmerge_spmvellpackrt_kernel_8(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = (drowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_zcgmerge_spmvellpackrt_kernel_16(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = (drowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_zcgmerge_spmvellpackrt_kernel_32(
int n,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowlength,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaDoubleComplex shared[];
if(i < n ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int max_ = (drowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_zcgmerge_spmvellpackrt_kernel2(
int n,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_Z_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_zcgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if(i < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++){
int col = dcolind [offset+ blocksize * n + Idx ];
magmaDoubleComplex val = dval[offset+ blocksize * n + Idx];
if( val != 0){
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_zcgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_zcgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_zcgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_zcg_rhokernel(
magmaDoubleComplex_ptr skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_z_sparse_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dd magmaDoubleComplex_ptr
input vector d
@param[out]
dz magmaDoubleComplex_ptr
input vector z
@param[out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zcgmerge_spmv1(
magma_z_sparse_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dd,
magmaDoubleComplex_ptr dz,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
magma_zcgmerge_spmvcsr_kernel<<<Gs, Bs, Ms, queue >>>
( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
magma_zcgmerge_spmvellpack_kernel<<<Gs, Bs, Ms, queue >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
magma_zcgmerge_spmvell_kernel<<<Gs, Bs, Ms, queue >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_SELLP ) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = sqrt(A.numblocks);
int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( magmaDoubleComplex );
if ( A.alignment == 8)
magma_zcgmerge_spmvsellpt_kernel_8
<<< gridsellp, block, Mssellp, queue >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
magma_zcgmerge_spmvsellpt_kernel_16
<<< gridsellp, block, Mssellp, queue >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
magma_zcgmerge_spmvsellpt_kernel_32
<<< gridsellp, block, Mssellp, queue >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_zcgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize);
int num_threads = A.alignment*A.blocksize;
int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment)
*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( magmaDoubleComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
magma_zcgmerge_spmvellpackrt_kernel_32
<<< gridellrt, num_threads , Mellrt, queue >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
magma_zcgmerge_spmvellpackrt_kernel_16
<<< gridellrt, num_threads , Mellrt, queue >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
magma_zcgmerge_spmvellpackrt_kernel_8
<<< gridellrt, num_threads , Mellrt, queue >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", A.alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_zcgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>>
( A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zcg_rhokernel<<<Gs2, Bs2, 0>>>( skp );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_zcgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d,
magmaDoubleComplex_ptr z,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
magmaDoubleComplex rho = skp[3];
magmaDoubleComplex mrho = MAGMA_Z_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0);
if( i<n ){
x[i] += rho * d[i] ;
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_zcg_alphabetakernel(
magmaDoubleComplex_ptr skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_Z_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_zcg_d_kernel(
int n,
magmaDoubleComplex_ptr skp,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaDoubleComplex alpha = skp[0];
if( i<n ){
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in/out]
dx magmaDoubleComplex_ptr
input vector x
@param[in/out]
dr magmaDoubleComplex_ptr
input/output vector r
@param[in]
dd magmaDoubleComplex_ptr
input vector d
@param[in]
dz magmaDoubleComplex_ptr
input vector z
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zsygpuk
********************************************************************/
extern "C" magma_int_t
magma_zcgmerge_xrbeta(
int n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dd,
magmaDoubleComplex_ptr dz,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_zcgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>>
( n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+1, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zcg_alphabetakernel<<<Gs2, Bs2, 0>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( (n+local_block_size-1)/local_block_size );
magma_zcg_d_kernel<<<Gs3, Bs3, 0>>>( n, skp, dr, dd );
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
80208d2a1ba73e8bd730dcf58a7bd0c254526025.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void kernelFunction(int *input)
{
input[threadIdx.x] = 32 - threadIdx.x;
} | 80208d2a1ba73e8bd730dcf58a7bd0c254526025.cu | extern "C" __global__ void kernelFunction(int *input)
{
input[threadIdx.x] = 32 - threadIdx.x;
} |
ef69adc43c76e3781f0e7ee7c7d019dd7ffabf9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <npp.h>
#include <math.h>
#include "cufft_error.h"
#define THREADS_PER_BLOCK 512
/**Taken from common.h from GPGPU Workshop code**/
#define CUDA_CALL(x,s) { hipError_t rc = ( x ); if (rc != hipSuccess) { \
printf("%s (%s) at %s:%d\n", s, hipGetErrorString(rc), __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
}}
#define CUFFT_CALL(x,s) { hipfftResult_t rc = ( x ); if (rc != HIPFFT_SUCCESS ) { \
printf("%s (%s) at %s:%d\n", s, cufftGetErrorString(rc), __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
}}
void create_boxcar_kernel(float *kern, int filterWidth, int lenFFT)
{
float *boxcars;
int jj=0;
boxcars=(float *)malloc(sizeof(float)*lenFFT);
printf("Filter width: %d\n", filterWidth);
for(jj = 0; jj < lenFFT; jj++)
{
if(jj < filterWidth/2+1)
{
boxcars[jj]=1.0;
}
else if(jj>(lenFFT - filterWidth/2) && filterWidth%2 == 0 && filterWidth>2)
{
boxcars[jj]=1.0;
}
else if(jj>(lenFFT - filterWidth/2-1) && filterWidth%2 == 1)
{
boxcars[jj]=1.0;
}
else
boxcars[jj]=0.0;
}
//Copy to GPU for now copy out of function
memcpy(kern, boxcars, sizeof(float)*lenFFT);
free(boxcars);
}
__global__ void complexMultiply(hipfftComplex *fft_a, hipfftComplex *fft_b, hipfftComplex *fft_out, int fftlen)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if(ii < fftlen)
{
fft_out[ii].x = fft_a[ii].x * fft_b[ii].x - fft_a[ii].y * fft_b[ii].y;
fft_out[ii].y = fft_a[ii].y * fft_b[ii].x + fft_a[ii].x * fft_b[ii].y;
}
}
void convolve(hipfftComplex *d_data, hipfftComplex *d_box, int fftlen, hipfftComplex *d_conv)
{
hipEvent_t start, stop;
float RunTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipfftHandle plan;
CUFFT_CALL(hipfftPlan1d(&plan, fftlen, HIPFFT_R2C, 1),
"Error generating plan for forward FFT");
//In place transform
CUFFT_CALL(hipfftExecR2C(plan, (hipfftReal *) d_box, d_box),
"Error calculating forward FFT of filter kernel");
CUFFT_CALL(hipfftExecR2C(plan, (hipfftReal *) d_data, d_data),
"Error calculating forward FFT of TS data");
CUFFT_CALL(hipfftDestroy(plan), "Error destroying plan for forward FFT");
//Multiply kernel and data
hipEventRecord(start, 0);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&RunTime, start, stop);
printf("FFT length: %d \n", fftlen);
printf("Configuring and launching the forward FFTs took %f ms \n", RunTime);
//Set up and launch complex multiply thread
hipEventRecord(start, 0);
int blocksPerGrid = (fftlen + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( complexMultiply) , dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK) , 0, 0, d_box, d_data, d_conv, fftlen);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&RunTime, start, stop);
printf("The complex multiply took %f ms using %d threads on %d blocks\n", RunTime, THREADS_PER_BLOCK, blocksPerGrid);
//Setup and launch inverse transform
hipEventRecord(start, 0);
CUFFT_CALL(hipfftPlan1d(&plan, fftlen, HIPFFT_C2R, 1),
"Error generating plan for reverse FFT ");
CUFFT_CALL(hipfftExecC2R(plan, d_conv, (hipfftReal *) d_conv),
"Error calculating reverse FFT");
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&RunTime, start, stop);
printf("Configuring and launching the reverse FFT took %f ms \n", RunTime);
CUFFT_CALL(hipfftDestroy(plan), "Error destroying reverse FFT plan");
}
void calc_stats(float *indata, int size, float *tsmean, float *tsstd)
{
NppStatus npp_status;
//Determine buffer size
int BuffSize;
nppsMeanStdDevGetBufferSize_32f(size, &BuffSize);
Npp8u *pDevBuff;
float *d_mean, *d_std;
float mtmp[1]={-2.0};
float stmp[1]={-2.0};
printf("M & S inside: %f %f\n", mtmp, stmp);
CUDA_CALL(hipMalloc((void **) &d_mean, sizeof(float)), "Allocate device float");
CUDA_CALL(hipMalloc((void **) &d_std, sizeof(float)), "Allocate device float");
CUDA_CALL(hipMemcpy(d_mean, mtmp, sizeof(float), hipMemcpyHostToDevice), "CPY");
CUDA_CALL(hipMemcpy(d_std, stmp, sizeof(float), hipMemcpyHostToDevice), "CPY");
printf("Buff size: %d\n", BuffSize);
//Allocate scratch buffer
CUDA_CALL(hipMalloc((void **) &pDevBuff, BuffSize),
"Failure allocating scratch buffer for stats calc.");
//Calc stats
npp_status=nppsMeanStdDev_32f( (Npp32f *) indata, size, d_mean, d_std, pDevBuff);
printf("Status: %d\n", npp_status);
CUDA_CALL(hipMemcpy(tsmean, d_mean, sizeof(float), hipMemcpyDeviceToHost),
"Copy mean back");
CUDA_CALL(hipMemcpy(tsstd, d_std, sizeof(float), hipMemcpyDeviceToHost),
"Copy stddev back");
printf("M & S inside: %f %f\n", tsmean, tsstd);
//Free memory
CUDA_CALL(hipFree(d_mean), "Freeing mean array");
CUDA_CALL(hipFree(d_std), "Freeing stddev array");
CUDA_CALL(hipFree(pDevBuff), "Freeing buffer memory");
}
extern "C" {
void ts_convolve(float *indata, int width, int fftlen, float *outdata)
{
size_t size = fftlen*sizeof(float);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float *boxout = (float *)malloc(size);
//Print some device properties
hipDeviceProp_t dev_prop;
CUDA_CALL(hipGetDeviceProperties(&dev_prop, 0), "Error getting device properties");
printf("******** DEVICE PROPERTIES ********\n");
printf("Device name: %s\n", dev_prop.name);
printf("Maximum Threads per Block: %d\n", dev_prop.maxThreadsPerBlock);
printf("Maximum Dimensions of a Block: %d %d %d\n", dev_prop.maxThreadsDim[0],
dev_prop.maxThreadsDim[1], dev_prop.maxThreadsDim[2]);
printf("Maximum Dimensions of a Grid: %d %d %d\n",
dev_prop.maxGridSize[0], dev_prop.maxGridSize[1], dev_prop.maxGridSize[2]);
printf("Warp size in threads: %d\n", dev_prop.warpSize);
printf("Shared Memory per Block in Bytes: %d\n", dev_prop.sharedMemPerBlock);
printf("***********************************\n\n");
//Create boxcar kernal array
create_boxcar_kernel(boxout, width, fftlen);
//Copy kernel to GPU (eventually loop over several matched filter kernels
hipfftComplex *d_box = NULL;
hipfftComplex *d_data = NULL;
hipfftComplex *d_conv = NULL;
int compSize = sizeof(hipfftComplex)*fftlen;
CUDA_CALL(hipMalloc((void **)&d_box, compSize), "Error allocating array d_box");
CUDA_CALL(hipMalloc((void **)&d_data, compSize), "Error allocating array d_data");
CUDA_CALL(hipMalloc((void **)&d_conv, compSize), "Error allocating array d_conv");
CUDA_CALL(hipMemcpy(d_box, boxout, size, hipMemcpyHostToDevice),
"Error copying filter kernel data to GPU");
CUDA_CALL(hipMemcpy(d_data, indata, size, hipMemcpyHostToDevice),
"Error TS data to GPU");
//Calculate convolution
convolve(d_data, d_box, fftlen, d_conv);
//Copy back
CUDA_CALL(hipMemcpy(outdata, d_conv, size, hipMemcpyDeviceToHost),
"Error copying convolved TS from GPU");
free(boxout);
CUDA_CALL(hipFree(d_box), "Error freeing d_box");
CUDA_CALL(hipFree(d_data), "Error freeing d_data");
// CUDA_CALL(hipFree(d_conv), "Error freeing d_conv");
// hipDeviceReset();
//Calc stats
float tsmean[1]={0.0};
float tsstd[1]={0.0};
calc_stats((float *) d_conv, fftlen, tsmean, tsstd);
printf("Mean and Std Dev: %f %f\n", tsmean[0], tsstd[0]);
CUDA_CALL(hipFree(d_conv), "Error freeing d_conv");
}
}
| ef69adc43c76e3781f0e7ee7c7d019dd7ffabf9a.cu | #include <stdio.h>
#include <cuda.h>
#include <cufft.h>
#include <npp.h>
#include <math.h>
#include "cufft_error.h"
#define THREADS_PER_BLOCK 512
/**Taken from common.h from GPGPU Workshop code**/
#define CUDA_CALL(x,s) { cudaError_t rc = ( x ); if (rc != cudaSuccess) { \
printf("%s (%s) at %s:%d\n", s, cudaGetErrorString(rc), __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
}}
#define CUFFT_CALL(x,s) { cufftResult_t rc = ( x ); if (rc != CUFFT_SUCCESS ) { \
printf("%s (%s) at %s:%d\n", s, cufftGetErrorString(rc), __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
}}
void create_boxcar_kernel(float *kern, int filterWidth, int lenFFT)
{
float *boxcars;
int jj=0;
boxcars=(float *)malloc(sizeof(float)*lenFFT);
printf("Filter width: %d\n", filterWidth);
for(jj = 0; jj < lenFFT; jj++)
{
if(jj < filterWidth/2+1)
{
boxcars[jj]=1.0;
}
else if(jj>(lenFFT - filterWidth/2) && filterWidth%2 == 0 && filterWidth>2)
{
boxcars[jj]=1.0;
}
else if(jj>(lenFFT - filterWidth/2-1) && filterWidth%2 == 1)
{
boxcars[jj]=1.0;
}
else
boxcars[jj]=0.0;
}
//Copy to GPU for now copy out of function
memcpy(kern, boxcars, sizeof(float)*lenFFT);
free(boxcars);
}
__global__ void complexMultiply(cufftComplex *fft_a, cufftComplex *fft_b, cufftComplex *fft_out, int fftlen)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if(ii < fftlen)
{
fft_out[ii].x = fft_a[ii].x * fft_b[ii].x - fft_a[ii].y * fft_b[ii].y;
fft_out[ii].y = fft_a[ii].y * fft_b[ii].x + fft_a[ii].x * fft_b[ii].y;
}
}
void convolve(cufftComplex *d_data, cufftComplex *d_box, int fftlen, cufftComplex *d_conv)
{
cudaEvent_t start, stop;
float RunTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cufftHandle plan;
CUFFT_CALL(cufftPlan1d(&plan, fftlen, CUFFT_R2C, 1),
"Error generating plan for forward FFT");
//In place transform
CUFFT_CALL(cufftExecR2C(plan, (cufftReal *) d_box, d_box),
"Error calculating forward FFT of filter kernel");
CUFFT_CALL(cufftExecR2C(plan, (cufftReal *) d_data, d_data),
"Error calculating forward FFT of TS data");
CUFFT_CALL(cufftDestroy(plan), "Error destroying plan for forward FFT");
//Multiply kernel and data
cudaEventRecord(start, 0);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&RunTime, start, stop);
printf("FFT length: %d \n", fftlen);
printf("Configuring and launching the forward FFTs took %f ms \n", RunTime);
//Set up and launch complex multiply thread
cudaEventRecord(start, 0);
int blocksPerGrid = (fftlen + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
complexMultiply <<< blocksPerGrid, THREADS_PER_BLOCK >>> (d_box, d_data, d_conv, fftlen);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&RunTime, start, stop);
printf("The complex multiply took %f ms using %d threads on %d blocks\n", RunTime, THREADS_PER_BLOCK, blocksPerGrid);
//Setup and launch inverse transform
cudaEventRecord(start, 0);
CUFFT_CALL(cufftPlan1d(&plan, fftlen, CUFFT_C2R, 1),
"Error generating plan for reverse FFT ");
CUFFT_CALL(cufftExecC2R(plan, d_conv, (cufftReal *) d_conv),
"Error calculating reverse FFT");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&RunTime, start, stop);
printf("Configuring and launching the reverse FFT took %f ms \n", RunTime);
CUFFT_CALL(cufftDestroy(plan), "Error destroying reverse FFT plan");
}
void calc_stats(float *indata, int size, float *tsmean, float *tsstd)
{
NppStatus npp_status;
//Determine buffer size
int BuffSize;
nppsMeanStdDevGetBufferSize_32f(size, &BuffSize);
Npp8u *pDevBuff;
float *d_mean, *d_std;
float mtmp[1]={-2.0};
float stmp[1]={-2.0};
printf("M & S inside: %f %f\n", mtmp, stmp);
CUDA_CALL(cudaMalloc((void **) &d_mean, sizeof(float)), "Allocate device float");
CUDA_CALL(cudaMalloc((void **) &d_std, sizeof(float)), "Allocate device float");
CUDA_CALL(cudaMemcpy(d_mean, mtmp, sizeof(float), cudaMemcpyHostToDevice), "CPY");
CUDA_CALL(cudaMemcpy(d_std, stmp, sizeof(float), cudaMemcpyHostToDevice), "CPY");
printf("Buff size: %d\n", BuffSize);
//Allocate scratch buffer
CUDA_CALL(cudaMalloc((void **) &pDevBuff, BuffSize),
"Failure allocating scratch buffer for stats calc.");
//Calc stats
npp_status=nppsMeanStdDev_32f( (Npp32f *) indata, size, d_mean, d_std, pDevBuff);
printf("Status: %d\n", npp_status);
CUDA_CALL(cudaMemcpy(tsmean, d_mean, sizeof(float), cudaMemcpyDeviceToHost),
"Copy mean back");
CUDA_CALL(cudaMemcpy(tsstd, d_std, sizeof(float), cudaMemcpyDeviceToHost),
"Copy stddev back");
printf("M & S inside: %f %f\n", tsmean, tsstd);
//Free memory
CUDA_CALL(cudaFree(d_mean), "Freeing mean array");
CUDA_CALL(cudaFree(d_std), "Freeing stddev array");
CUDA_CALL(cudaFree(pDevBuff), "Freeing buffer memory");
}
extern "C" {
void ts_convolve(float *indata, int width, int fftlen, float *outdata)
{
size_t size = fftlen*sizeof(float);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float *boxout = (float *)malloc(size);
//Print some device properties
cudaDeviceProp dev_prop;
CUDA_CALL(cudaGetDeviceProperties(&dev_prop, 0), "Error getting device properties");
printf("******** DEVICE PROPERTIES ********\n");
printf("Device name: %s\n", dev_prop.name);
printf("Maximum Threads per Block: %d\n", dev_prop.maxThreadsPerBlock);
printf("Maximum Dimensions of a Block: %d %d %d\n", dev_prop.maxThreadsDim[0],
dev_prop.maxThreadsDim[1], dev_prop.maxThreadsDim[2]);
printf("Maximum Dimensions of a Grid: %d %d %d\n",
dev_prop.maxGridSize[0], dev_prop.maxGridSize[1], dev_prop.maxGridSize[2]);
printf("Warp size in threads: %d\n", dev_prop.warpSize);
printf("Shared Memory per Block in Bytes: %d\n", dev_prop.sharedMemPerBlock);
printf("***********************************\n\n");
//Create boxcar kernal array
create_boxcar_kernel(boxout, width, fftlen);
//Copy kernel to GPU (eventually loop over several matched filter kernels
cufftComplex *d_box = NULL;
cufftComplex *d_data = NULL;
cufftComplex *d_conv = NULL;
int compSize = sizeof(cufftComplex)*fftlen;
CUDA_CALL(cudaMalloc((void **)&d_box, compSize), "Error allocating array d_box");
CUDA_CALL(cudaMalloc((void **)&d_data, compSize), "Error allocating array d_data");
CUDA_CALL(cudaMalloc((void **)&d_conv, compSize), "Error allocating array d_conv");
CUDA_CALL(cudaMemcpy(d_box, boxout, size, cudaMemcpyHostToDevice),
"Error copying filter kernel data to GPU");
CUDA_CALL(cudaMemcpy(d_data, indata, size, cudaMemcpyHostToDevice),
"Error TS data to GPU");
//Calculate convolution
convolve(d_data, d_box, fftlen, d_conv);
//Copy back
CUDA_CALL(cudaMemcpy(outdata, d_conv, size, cudaMemcpyDeviceToHost),
"Error copying convolved TS from GPU");
free(boxout);
CUDA_CALL(cudaFree(d_box), "Error freeing d_box");
CUDA_CALL(cudaFree(d_data), "Error freeing d_data");
// CUDA_CALL(cudaFree(d_conv), "Error freeing d_conv");
// cudaDeviceReset();
//Calc stats
float tsmean[1]={0.0};
float tsstd[1]={0.0};
calc_stats((float *) d_conv, fftlen, tsmean, tsstd);
printf("Mean and Std Dev: %f %f\n", tsmean[0], tsstd[0]);
CUDA_CALL(cudaFree(d_conv), "Error freeing d_conv");
}
}
|
0b94014ced5386390b0bf3e43b2b6ba8d2a066a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_ba1;
int xdim0_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim0_update_halo_kernel1_ba1;
int ydim0_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim1_update_halo_kernel1_ba1;
int xdim1_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim1_update_halo_kernel1_ba1;
int ydim1_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim2_update_halo_kernel1_ba1;
int xdim2_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim2_update_halo_kernel1_ba1;
int ydim2_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim3_update_halo_kernel1_ba1;
int xdim3_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim3_update_halo_kernel1_ba1;
int ydim3_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim4_update_halo_kernel1_ba1;
int xdim4_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim4_update_halo_kernel1_ba1;
int ydim4_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim5_update_halo_kernel1_ba1;
int xdim5_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim5_update_halo_kernel1_ba1;
int ydim5_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim6_update_halo_kernel1_ba1;
int xdim6_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim6_update_halo_kernel1_ba1;
int ydim6_update_halo_kernel1_ba1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_ba1*(y)+xdim0_update_halo_kernel1_ba1*ydim0_update_halo_kernel1_ba1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_ba1*(y)+xdim1_update_halo_kernel1_ba1*ydim1_update_halo_kernel1_ba1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_ba1*(y)+xdim2_update_halo_kernel1_ba1*ydim2_update_halo_kernel1_ba1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_ba1*(y)+xdim3_update_halo_kernel1_ba1*ydim3_update_halo_kernel1_ba1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_ba1*(y)+xdim4_update_halo_kernel1_ba1*ydim4_update_halo_kernel1_ba1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_ba1*(y)+xdim5_update_halo_kernel1_ba1*ydim5_update_halo_kernel1_ba1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_ba1*(y)+xdim6_update_halo_kernel1_ba1*ydim6_update_halo_kernel1_ba1*(z))
//user function
__device__
inline void update_halo_kernel1_ba1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,0,1)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,0,1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,0,1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,0,1)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,0,1)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,0,1)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,0,1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_ba1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim0_update_halo_kernel1_ba1 * ydim0_update_halo_kernel1_ba1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim1_update_halo_kernel1_ba1 * ydim1_update_halo_kernel1_ba1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim2_update_halo_kernel1_ba1 * ydim2_update_halo_kernel1_ba1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim3_update_halo_kernel1_ba1 * ydim3_update_halo_kernel1_ba1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim4_update_halo_kernel1_ba1 * ydim4_update_halo_kernel1_ba1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim5_update_halo_kernel1_ba1 * ydim5_update_halo_kernel1_ba1;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim6_update_halo_kernel1_ba1 * ydim6_update_halo_kernel1_ba1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_ba1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_ba1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,20)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(20,"update_halo_kernel1_ba1");
OPS_kernels[20].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_ba1_h || ydim0 != ydim0_update_halo_kernel1_ba1_h || xdim1 != xdim1_update_halo_kernel1_ba1_h || ydim1 != ydim1_update_halo_kernel1_ba1_h || xdim2 != xdim2_update_halo_kernel1_ba1_h || ydim2 != ydim2_update_halo_kernel1_ba1_h || xdim3 != xdim3_update_halo_kernel1_ba1_h || ydim3 != ydim3_update_halo_kernel1_ba1_h || xdim4 != xdim4_update_halo_kernel1_ba1_h || ydim4 != ydim4_update_halo_kernel1_ba1_h || xdim5 != xdim5_update_halo_kernel1_ba1_h || ydim5 != ydim5_update_halo_kernel1_ba1_h || xdim6 != xdim6_update_halo_kernel1_ba1_h || ydim6 != ydim6_update_halo_kernel1_ba1_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel1_ba1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_ba1_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel1_ba1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_ba1_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel1_ba1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_ba1_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel1_ba1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_ba1_h = ydim1;
hipMemcpyToSymbol( xdim2_update_halo_kernel1_ba1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_ba1_h = xdim2;
hipMemcpyToSymbol( ydim2_update_halo_kernel1_ba1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_ba1_h = ydim2;
hipMemcpyToSymbol( xdim3_update_halo_kernel1_ba1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_ba1_h = xdim3;
hipMemcpyToSymbol( ydim3_update_halo_kernel1_ba1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_ba1_h = ydim3;
hipMemcpyToSymbol( xdim4_update_halo_kernel1_ba1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_ba1_h = xdim4;
hipMemcpyToSymbol( ydim4_update_halo_kernel1_ba1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_ba1_h = ydim4;
hipMemcpyToSymbol( xdim5_update_halo_kernel1_ba1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_ba1_h = xdim5;
hipMemcpyToSymbol( ydim5_update_halo_kernel1_ba1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_ba1_h = ydim5;
hipMemcpyToSymbol( xdim6_update_halo_kernel1_ba1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_ba1_h = xdim6;
hipMemcpyToSymbol( ydim6_update_halo_kernel1_ba1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_ba1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[20].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_ba1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[20].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[20].mpi_time += t2-t1;
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 20;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 20;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_ba1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(20,"update_halo_kernel1_ba1");
}
ops_enqueue_kernel(desc);
}
#endif
| 0b94014ced5386390b0bf3e43b2b6ba8d2a066a7.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_ba1;
int xdim0_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim0_update_halo_kernel1_ba1;
int ydim0_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim1_update_halo_kernel1_ba1;
int xdim1_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim1_update_halo_kernel1_ba1;
int ydim1_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim2_update_halo_kernel1_ba1;
int xdim2_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim2_update_halo_kernel1_ba1;
int ydim2_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim3_update_halo_kernel1_ba1;
int xdim3_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim3_update_halo_kernel1_ba1;
int ydim3_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim4_update_halo_kernel1_ba1;
int xdim4_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim4_update_halo_kernel1_ba1;
int ydim4_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim5_update_halo_kernel1_ba1;
int xdim5_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim5_update_halo_kernel1_ba1;
int ydim5_update_halo_kernel1_ba1_h = -1;
__constant__ int xdim6_update_halo_kernel1_ba1;
int xdim6_update_halo_kernel1_ba1_h = -1;
__constant__ int ydim6_update_halo_kernel1_ba1;
int ydim6_update_halo_kernel1_ba1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_ba1*(y)+xdim0_update_halo_kernel1_ba1*ydim0_update_halo_kernel1_ba1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_ba1*(y)+xdim1_update_halo_kernel1_ba1*ydim1_update_halo_kernel1_ba1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_ba1*(y)+xdim2_update_halo_kernel1_ba1*ydim2_update_halo_kernel1_ba1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_ba1*(y)+xdim3_update_halo_kernel1_ba1*ydim3_update_halo_kernel1_ba1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_ba1*(y)+xdim4_update_halo_kernel1_ba1*ydim4_update_halo_kernel1_ba1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_ba1*(y)+xdim5_update_halo_kernel1_ba1*ydim5_update_halo_kernel1_ba1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_ba1*(y)+xdim6_update_halo_kernel1_ba1*ydim6_update_halo_kernel1_ba1*(z))
//user function
__device__
inline void update_halo_kernel1_ba1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,0,1)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,0,1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,0,1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,0,1)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,0,1)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,0,1)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,0,1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_ba1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim0_update_halo_kernel1_ba1 * ydim0_update_halo_kernel1_ba1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim1_update_halo_kernel1_ba1 * ydim1_update_halo_kernel1_ba1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim2_update_halo_kernel1_ba1 * ydim2_update_halo_kernel1_ba1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim3_update_halo_kernel1_ba1 * ydim3_update_halo_kernel1_ba1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim4_update_halo_kernel1_ba1 * ydim4_update_halo_kernel1_ba1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim5_update_halo_kernel1_ba1 * ydim5_update_halo_kernel1_ba1;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_ba1 + idx_z * 1*1 * xdim6_update_halo_kernel1_ba1 * ydim6_update_halo_kernel1_ba1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_ba1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_ba1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,20)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(20,"update_halo_kernel1_ba1");
OPS_kernels[20].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_ba1_h || ydim0 != ydim0_update_halo_kernel1_ba1_h || xdim1 != xdim1_update_halo_kernel1_ba1_h || ydim1 != ydim1_update_halo_kernel1_ba1_h || xdim2 != xdim2_update_halo_kernel1_ba1_h || ydim2 != ydim2_update_halo_kernel1_ba1_h || xdim3 != xdim3_update_halo_kernel1_ba1_h || ydim3 != ydim3_update_halo_kernel1_ba1_h || xdim4 != xdim4_update_halo_kernel1_ba1_h || ydim4 != ydim4_update_halo_kernel1_ba1_h || xdim5 != xdim5_update_halo_kernel1_ba1_h || ydim5 != ydim5_update_halo_kernel1_ba1_h || xdim6 != xdim6_update_halo_kernel1_ba1_h || ydim6 != ydim6_update_halo_kernel1_ba1_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel1_ba1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_ba1_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel1_ba1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_ba1_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel1_ba1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_ba1_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel1_ba1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_ba1_h = ydim1;
cudaMemcpyToSymbol( xdim2_update_halo_kernel1_ba1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_ba1_h = xdim2;
cudaMemcpyToSymbol( ydim2_update_halo_kernel1_ba1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_ba1_h = ydim2;
cudaMemcpyToSymbol( xdim3_update_halo_kernel1_ba1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_ba1_h = xdim3;
cudaMemcpyToSymbol( ydim3_update_halo_kernel1_ba1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_ba1_h = ydim3;
cudaMemcpyToSymbol( xdim4_update_halo_kernel1_ba1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_ba1_h = xdim4;
cudaMemcpyToSymbol( ydim4_update_halo_kernel1_ba1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_ba1_h = ydim4;
cudaMemcpyToSymbol( xdim5_update_halo_kernel1_ba1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_ba1_h = xdim5;
cudaMemcpyToSymbol( ydim5_update_halo_kernel1_ba1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_ba1_h = ydim5;
cudaMemcpyToSymbol( xdim6_update_halo_kernel1_ba1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_ba1_h = xdim6;
cudaMemcpyToSymbol( ydim6_update_halo_kernel1_ba1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_ba1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[20].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_ba1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[20].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[20].mpi_time += t2-t1;
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 20;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 20;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_ba1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(20,"update_halo_kernel1_ba1");
}
ops_enqueue_kernel(desc);
}
#endif
|
f950560d2193a31a7d1a75c176d51884b6d73faa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
**/
#include "reader_impl.hpp"
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
#include <cudf/io/types.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <tuple>
#include <unordered_map>
using std::string;
using std::vector;
using cudf::detail::device_span;
using cudf::detail::host_span;
namespace cudf {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
/**
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept
{
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Translates a dtype string and returns its dtype enumeration and any
* extended dtype flags that are supported by cuIO. Often, this is a column
* with the same underlying dtype the basic types, but with different parsing
* interpretations.
*
* @param[in] dtype String containing the basic or extended dtype
*
* @return Tuple of data_type and flags
*/
std::tuple<data_type, column_parse::flags> get_dtype_info(const std::string &dtype)
{
if (dtype == "hex" || dtype == "hex64") {
return std::make_tuple(data_type{cudf::type_id::INT64}, column_parse::as_hexadecimal);
}
if (dtype == "hex32") {
return std::make_tuple(data_type{cudf::type_id::INT32}, column_parse::as_hexadecimal);
}
return std::make_tuple(convert_string_to_dtype(dtype), column_parse::as_default);
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar)
{
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) { str.erase(first_quote, 1); }
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) { str.erase(last_quote, 1); }
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> setColumnNames(std::vector<char> const &header,
ParseOptions const &opts,
int header_row,
std::string prefix)
{
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) { return col_names; }
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts.terminator) ||
(!quotation && first_row[pos] == opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == opts.delimiter || first_row[pos] == opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && opts.terminator == '\n' && first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == opts.terminator) { break; }
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (opts.multi_delimiter && pos < first_row.size() && first_row[pos] == opts.delimiter &&
first_row[pos + 1] == opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
table_with_metadata reader::impl::read(hipStream_t stream)
{
auto range_offset = opts_.get_byte_range_offset();
auto range_size = opts_.get_byte_range_size();
auto skip_rows = opts_.get_skiprows();
auto skip_end_rows = opts_.get_skipfooter();
auto num_rows = opts_.get_nrows();
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(compression_type_ == "none",
"Reading compressed data using `byte range` is unsupported");
}
size_t map_range_size = 0;
if (range_size != 0) {
const auto num_columns = ::max(opts_.get_names().size(), opts_.get_dtypes().size());
map_range_size = range_size + calculateMaxRowSize(num_columns);
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
// Return an empty dataframe if no data and no column metadata to process
if (source_->is_empty() && (opts_.get_names().empty() || opts_.get_dtypes().empty())) {
return {std::make_unique<table>(), {}};
}
// Transfer source data to GPU
if (!source_->is_empty()) {
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
auto buffer = source_->host_read(range_offset, data_size);
auto h_data = host_span<char const>( //
reinterpret_cast<const char *>(buffer->data()),
buffer->size());
std::vector<char> h_uncomp_data_owner;
if (compression_type_ != "none") {
h_uncomp_data_owner = get_uncompressed_data(h_data, compression_type_);
h_data = h_uncomp_data_owner;
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 && skip_rows <= 0 &&
skip_end_rows <= 0 && num_rows == -1;
// With byte range, find the start of the first data row
size_t const data_start_offset = (range_offset != 0) ? find_first_row_start(h_data) : 0;
// TODO: Allow parsing the header outside the mapped range
CUDF_EXPECTS((range_offset == 0 || opts_.get_header() < 0),
"byte_range offset with header not supported");
// Gather row offsets
gather_row_offsets(h_data,
data_start_offset,
(range_size) ? range_size : h_data.size(),
(skip_rows > 0) ? skip_rows : 0,
num_rows,
load_whole_file,
stream);
// Exclude the rows that are to be skipped from the end
if (skip_end_rows > 0 && static_cast<size_t>(skip_end_rows) < row_offsets_.size()) {
row_offsets_.resize(row_offsets_.size() - skip_end_rows);
}
// Exclude the end-of-data row from number of rows with actual data
num_records_ = row_offsets_.size();
num_records_ -= (num_records_ > 0);
} else {
num_records_ = 0;
}
// Check if the user gave us a list of column names
if (not opts_.get_names().empty()) {
h_column_flags_.resize(opts_.get_names().size(), column_parse::enabled);
col_names_ = opts_.get_names();
} else {
col_names_ = setColumnNames(header_, opts, opts_.get_header(), opts_.get_prefix());
num_actual_cols_ = num_active_cols_ = col_names_.size();
h_column_flags_.resize(num_actual_cols_, column_parse::enabled);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < col_names_.size(); ++col_idx) {
if (col_names_[col_idx].empty()) {
col_names_[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
// Looking for duplicates
std::unordered_map<string, int> col_names_histogram;
for (auto &col_name : col_names_) {
// Operator [] inserts a default-initialized value if the given key is not
// present
if (++col_names_histogram[col_name] > 1) {
if (opts_.is_enabled_mangle_dupe_cols()) {
// Rename duplicates of column X as X.1, X.2, ...; First appearance
// stays as X
col_name += "." + std::to_string(col_names_histogram[col_name] - 1);
} else {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - col_names_.data();
h_column_flags_[idx] = column_parse::disabled;
}
}
}
// Update the number of columns to be processed, if some might have been
// removed
if (!opts_.is_enabled_mangle_dupe_cols()) { num_active_cols_ = col_names_histogram.size(); }
}
// User can specify which columns should be parsed
if (!opts_.get_use_cols_indexes().empty() || !opts_.get_use_cols_names().empty()) {
std::fill(h_column_flags_.begin(), h_column_flags_.end(), column_parse::disabled);
for (const auto index : opts_.get_use_cols_indexes()) {
h_column_flags_[index] = column_parse::enabled;
}
num_active_cols_ = opts_.get_use_cols_indexes().size();
for (const auto &name : opts_.get_use_cols_names()) {
const auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
h_column_flags_[it - col_names_.begin()] = column_parse::enabled;
num_active_cols_++;
}
}
}
// User can specify which columns should be inferred as datetime
if (!opts_.get_infer_date_indexes().empty() || !opts_.get_infer_date_names().empty()) {
for (const auto index : opts_.get_infer_date_indexes()) {
h_column_flags_[index] |= column_parse::as_datetime;
}
for (const auto &name : opts_.get_infer_date_names()) {
auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
h_column_flags_[it - col_names_.begin()] |= column_parse::as_datetime;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_cols_ == 0) { return {std::make_unique<table>(), {}}; }
auto metadata = table_metadata{};
auto out_columns = std::vector<std::unique_ptr<cudf::column>>();
auto column_types = gather_column_types(stream);
out_columns.reserve(column_types.size());
if (num_records_ != 0) {
auto out_buffers = decode_data(column_types, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
metadata.column_names.emplace_back(out_buffers[i].name);
if (column_types[i].id() == type_id::STRING && opts.quotechar != '\0' &&
opts.doublequote == true) {
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
// TODO: Would be much more efficient to perform this operation in-place
// during the conversion stage
const std::string quotechar(1, opts.quotechar);
const std::string dblquotechar(2, opts.quotechar);
std::unique_ptr<column> col = make_strings_column(out_buffers[i]._strings, stream);
out_columns.emplace_back(
cudf::strings::replace(col->view(), dblquotechar, quotechar, -1, mr_));
} else {
out_columns.emplace_back(make_column(out_buffers[i], stream, mr_));
}
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Handle empty metadata
for (int col = 0; col < num_actual_cols_; ++col) {
if (h_column_flags_[col] & column_parse::enabled) {
metadata.column_names.emplace_back(col_names_[col]);
}
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata)};
}
size_t reader::impl::find_first_row_start(host_span<char const> const data)
{
// For now, look for the first terminator (assume the first terminator isn't within a quote)
// TODO: Attempt to infer this from the data
size_t pos = 0;
while (pos < data.size() && data[pos] != opts.terminator) { ++pos; }
return ::min(pos + 1, data.size());
}
void reader::impl::gather_row_offsets(host_span<char const> const data,
size_t range_begin,
size_t range_end,
size_t skip_rows,
int64_t num_rows,
bool load_whole_file,
hipStream_t stream)
{
constexpr size_t max_chunk_bytes = 64 * 1024 * 1024; // 64MB
size_t buffer_size = ::min(max_chunk_bytes, data.size());
size_t max_blocks =
std::max<size_t>((buffer_size / cudf::io::csv::gpu::rowofs_block_bytes) + 1, 2);
hostdevice_vector<uint64_t> row_ctx(max_blocks);
size_t buffer_pos = ::min(range_begin - ::min(range_begin, sizeof(char)), data.size());
size_t pos = ::min(range_begin, data.size());
size_t header_rows = (opts_.get_header() >= 0) ? opts_.get_header() + 1 : 0;
uint64_t ctx = 0;
// For compatibility with the previous parser, a row is considered in-range if the
// previous row terminator is within the given range
range_end += (range_end < data.size());
data_.resize(0);
row_offsets_.resize(0);
data_.reserve((load_whole_file) ? data.size() : ::min(buffer_size * 2, data.size()));
do {
size_t target_pos = ::min(pos + max_chunk_bytes, data.size());
size_t chunk_size = target_pos - pos;
data_.insert(data_.end(), data.begin() + buffer_pos + data_.size(), data.begin() + target_pos);
// Pass 1: Count the potential number of rows in each character block for each
// possible parser state at the beginning of the block.
uint32_t num_blocks = cudf::io::csv::gpu::gather_row_offsets(opts,
row_ctx.device_ptr(),
device_span<uint64_t>(),
data_,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
// Sum up the rows in each character block, selecting the row count that
// corresponds to the current input context. Also stores the now known input
// context per character block that will be needed by the second pass.
for (uint32_t i = 0; i < num_blocks; i++) {
uint64_t ctx_next = cudf::io::csv::gpu::select_row_context(ctx, row_ctx[i]);
row_ctx[i] = ctx;
ctx = ctx_next;
}
size_t total_rows = ctx >> 2;
if (total_rows > skip_rows) {
// At least one row in range in this batch
row_offsets_.resize(total_rows - skip_rows);
CUDA_TRY(hipMemcpyAsync(row_ctx.device_ptr(),
row_ctx.host_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyHostToDevice,
stream));
// Pass 2: Output row offsets
cudf::io::csv::gpu::gather_row_offsets(opts,
row_ctx.device_ptr(),
row_offsets_,
data_,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
// With byte range, we want to keep only one row out of the specified range
if (range_end < data.size()) {
CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
size_t rows_out_of_range = 0;
for (uint32_t i = 0; i < num_blocks; i++) { rows_out_of_range += row_ctx[i]; }
if (rows_out_of_range != 0) {
// Keep one row out of range (used to infer length of previous row)
auto new_row_offsets_size =
row_offsets_.size() - ::min(rows_out_of_range - 1, row_offsets_.size());
row_offsets_.resize(new_row_offsets_size);
// Implies we reached the end of the range
break;
}
}
// num_rows does not include blank rows
if (num_rows >= 0) {
if (row_offsets_.size() > header_rows + static_cast<size_t>(num_rows)) {
size_t num_blanks =
cudf::io::csv::gpu::count_blank_rows(opts, data_, row_offsets_, stream);
if (row_offsets_.size() - num_blanks > header_rows + static_cast<size_t>(num_rows)) {
// Got the desired number of rows
break;
}
}
}
} else {
// Discard data (all rows below skip_rows), keeping one character for history
size_t discard_bytes = ::max(data_.size(), sizeof(char)) - sizeof(char);
if (discard_bytes != 0) {
data_.erase(data_.begin(), data_.begin() + discard_bytes);
buffer_pos += discard_bytes;
}
}
pos = target_pos;
} while (pos < data.size());
// Eliminate blank rows
if (row_offsets_.size() != 0) {
cudf::io::csv::gpu::remove_blank_rows(opts, data_, row_offsets_, stream);
}
// Remove header rows and extract header
const size_t header_row_index = std::max<size_t>(header_rows, 1) - 1;
if (header_row_index + 1 < row_offsets_.size()) {
CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_offsets_.data().get() + header_row_index,
2 * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
const auto header_start = buffer_pos + row_ctx[0];
const auto header_end = buffer_pos + row_ctx[1];
CUDF_EXPECTS(header_start <= header_end && header_end <= data.size(),
"Invalid csv header location");
header_.assign(data.begin() + header_start, data.begin() + header_end);
if (header_rows > 0) {
row_offsets_.erase(row_offsets_.begin(), row_offsets_.begin() + header_rows);
}
}
// Apply num_rows limit
if (num_rows >= 0) { row_offsets_.resize(std::min<size_t>(row_offsets_.size(), num_rows + 1)); }
}
std::vector<data_type> reader::impl::gather_column_types(hipStream_t stream)
{
std::vector<data_type> dtypes;
if (opts_.get_dtypes().empty()) {
if (num_records_ == 0) {
dtypes.resize(num_active_cols_, data_type{type_id::EMPTY});
} else {
d_column_flags_ = h_column_flags_;
auto column_stats = cudf::io::csv::gpu::detect_column_types(
opts, data_, d_column_flags_, row_offsets_, num_active_cols_, stream);
CUDA_TRY(hipStreamSynchronize(stream));
for (int col = 0; col < num_active_cols_; col++) {
unsigned long long countInt = column_stats[col].countInt8 + column_stats[col].countInt16 +
column_stats[col].countInt32 + column_stats[col].countInt64;
if (column_stats[col].countNULL == num_records_) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes.emplace_back(cudf::type_id::INT8);
} else if (column_stats[col].countString > 0L) {
dtypes.emplace_back(cudf::type_id::STRING);
} else if (column_stats[col].countDateAndTime > 0L) {
dtypes.emplace_back(cudf::type_id::TIMESTAMP_NANOSECONDS);
} else if (column_stats[col].countBool > 0L) {
dtypes.emplace_back(cudf::type_id::BOOL8);
} else if (column_stats[col].countFloat > 0L ||
(column_stats[col].countFloat == 0L && countInt > 0L &&
column_stats[col].countNULL > 0L)) {
// The second condition has been added to conform to
// PANDAS which states that a column of integers with
// a single NULL record need to be treated as floats.
dtypes.emplace_back(cudf::type_id::FLOAT64);
} else {
// All other integers are stored as 64-bit to conform to PANDAS
dtypes.emplace_back(cudf::type_id::INT64);
}
}
}
} else {
const bool is_dict =
std::all_of(opts_.get_dtypes().begin(), opts_.get_dtypes().end(), [](const auto &s) {
return s.find(':') != std::string::npos;
});
if (!is_dict) {
if (opts_.get_dtypes().size() == 1) {
// If it's a single dtype, assign that dtype to all active columns
data_type dtype_;
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[0]);
dtypes.resize(num_active_cols_, dtype_);
for (int col = 0; col < num_actual_cols_; col++) { h_column_flags_[col] |= col_flags_; }
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
} else {
// If it's a list, assign dtypes to active columns in the given order
CUDF_EXPECTS(static_cast<int>(opts_.get_dtypes().size()) >= num_actual_cols_,
"Must specify data types for all columns");
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (h_column_flags_[col] & column_parse::enabled) {
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[col]);
h_column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
} else {
// Translate vector of `name : dtype` strings to map
// NOTE: Incoming pairs can be out-of-order from column names in dataset
std::unordered_map<std::string, std::string> col_type_map;
for (const auto &pair : opts_.get_dtypes()) {
const auto pos = pair.find_last_of(':');
const auto name = pair.substr(0, pos);
const auto dtype = pair.substr(pos + 1, pair.size());
col_type_map[name] = dtype;
}
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (h_column_flags_[col] & column_parse::enabled) {
CUDF_EXPECTS(col_type_map.find(col_names_[col]) != col_type_map.end(),
"Must specify data types for all active columns");
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(col_type_map[col_names_[col]]);
h_column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
}
if (opts_.get_timestamp_type().id() != cudf::type_id::EMPTY) {
for (auto &type : dtypes) {
if (cudf::is_timestamp(type)) { type = opts_.get_timestamp_type(); }
}
}
for (size_t i = 0; i < dtypes.size(); i++) {
// Replace EMPTY dtype with STRING
if (dtypes[i].id() == type_id::EMPTY) { dtypes[i] = data_type{type_id::STRING}; }
}
return dtypes;
}
std::vector<column_buffer> reader::impl::decode_data(std::vector<data_type> const &column_types,
hipStream_t stream)
{
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (int col = 0, active_col = 0; col < num_actual_cols_; ++col) {
if (h_column_flags_[col] & column_parse::enabled) {
const bool is_final_allocation = column_types[active_col].id() != type_id::STRING;
auto out_buffer =
column_buffer(column_types[active_col],
num_records_,
true,
stream,
is_final_allocation ? mr_ : rmm::mr::get_current_device_resource());
out_buffer.name = col_names_[col];
out_buffers.emplace_back(std::move(out_buffer));
active_col++;
}
}
thrust::host_vector<void *> h_data(num_active_cols_);
thrust::host_vector<bitmask_type *> h_valid(num_active_cols_);
for (int i = 0; i < num_active_cols_; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
rmm::device_vector<data_type> d_dtypes(column_types);
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<bitmask_type *> d_valid = h_valid;
d_column_flags_ = h_column_flags_;
cudf::io::csv::gpu::decode_row_column_data(
opts, data_, d_column_flags_, row_offsets_, d_dtypes, d_data, d_valid, stream);
CUDA_TRY(hipStreamSynchronize(stream));
for (int i = 0; i < num_active_cols_; ++i) { out_buffers[i].null_count() = UNKNOWN_NULL_COUNT; }
return out_buffers;
}
reader::impl::impl(std::unique_ptr<datasource> source,
std::string filepath,
csv_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: mr_(mr), source_(std::move(source)), filepath_(filepath), opts_(options)
{
num_actual_cols_ = opts_.get_names().size();
num_active_cols_ = num_actual_cols_;
if (opts_.is_enabled_delim_whitespace()) {
opts.delimiter = ' ';
opts.multi_delimiter = true;
} else {
opts.delimiter = opts_.get_delimiter();
opts.multi_delimiter = false;
}
opts.terminator = opts_.get_lineterminator();
if (opts_.get_quotechar() != '\0' && opts_.get_quoting() != quote_style::NONE) {
opts.quotechar = opts_.get_quotechar();
opts.keepquotes = false;
opts.doublequote = opts_.is_enabled_doublequote();
} else {
opts.quotechar = '\0';
opts.keepquotes = true;
opts.doublequote = false;
}
opts.skipblanklines = opts_.is_enabled_skip_blank_lines();
opts.comment = opts_.get_comment();
opts.dayfirst = opts_.is_enabled_dayfirst();
opts.decimal = opts_.get_decimal();
opts.thousands = opts_.get_thousands();
CUDF_EXPECTS(opts.decimal != opts.delimiter, "Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(opts.thousands != opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
compression_type_ =
infer_compression_type(opts_.get_compression(),
filepath,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
// Handle user-defined false values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (opts_.get_true_values().size() != 0) {
d_trie_true_ = createSerializedTrie(opts_.get_true_values());
opts.trueValuesTrie = d_trie_true_.data().get();
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (opts_.get_false_values().size() != 0) {
d_trie_false_ = createSerializedTrie(opts_.get_false_values());
opts.falseValuesTrie = d_trie_false_.data().get();
}
// Handle user-defined N/A values, whereby field data is treated as null
if (opts_.get_na_values().size() != 0) {
d_trie_na_ = createSerializedTrie(opts_.get_na_values());
opts.naValuesTrie = d_trie_na_.data().get();
}
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
csv_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
_impl = std::make_unique<impl>(nullptr, filepaths[0], options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
csv_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), "", options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(hipStream_t stream) { return _impl->read(stream); }
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
| f950560d2193a31a7d1a75c176d51884b6d73faa.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
**/
#include "reader_impl.hpp"
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
#include <cudf/io/types.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <tuple>
#include <unordered_map>
using std::string;
using std::vector;
using cudf::detail::device_span;
using cudf::detail::host_span;
namespace cudf {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
/**
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
**/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept
{
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Translates a dtype string and returns its dtype enumeration and any
* extended dtype flags that are supported by cuIO. Often, this is a column
* with the same underlying dtype the basic types, but with different parsing
* interpretations.
*
* @param[in] dtype String containing the basic or extended dtype
*
* @return Tuple of data_type and flags
*/
std::tuple<data_type, column_parse::flags> get_dtype_info(const std::string &dtype)
{
if (dtype == "hex" || dtype == "hex64") {
return std::make_tuple(data_type{cudf::type_id::INT64}, column_parse::as_hexadecimal);
}
if (dtype == "hex32") {
return std::make_tuple(data_type{cudf::type_id::INT32}, column_parse::as_hexadecimal);
}
return std::make_tuple(convert_string_to_dtype(dtype), column_parse::as_default);
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar)
{
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) { str.erase(first_quote, 1); }
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) { str.erase(last_quote, 1); }
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> setColumnNames(std::vector<char> const &header,
ParseOptions const &opts,
int header_row,
std::string prefix)
{
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) { return col_names; }
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts.terminator) ||
(!quotation && first_row[pos] == opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == opts.delimiter || first_row[pos] == opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && opts.terminator == '\n' && first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == opts.terminator) { break; }
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (opts.multi_delimiter && pos < first_row.size() && first_row[pos] == opts.delimiter &&
first_row[pos + 1] == opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
table_with_metadata reader::impl::read(cudaStream_t stream)
{
auto range_offset = opts_.get_byte_range_offset();
auto range_size = opts_.get_byte_range_size();
auto skip_rows = opts_.get_skiprows();
auto skip_end_rows = opts_.get_skipfooter();
auto num_rows = opts_.get_nrows();
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(compression_type_ == "none",
"Reading compressed data using `byte range` is unsupported");
}
size_t map_range_size = 0;
if (range_size != 0) {
const auto num_columns = std::max(opts_.get_names().size(), opts_.get_dtypes().size());
map_range_size = range_size + calculateMaxRowSize(num_columns);
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
// Return an empty dataframe if no data and no column metadata to process
if (source_->is_empty() && (opts_.get_names().empty() || opts_.get_dtypes().empty())) {
return {std::make_unique<table>(), {}};
}
// Transfer source data to GPU
if (!source_->is_empty()) {
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
auto buffer = source_->host_read(range_offset, data_size);
auto h_data = host_span<char const>( //
reinterpret_cast<const char *>(buffer->data()),
buffer->size());
std::vector<char> h_uncomp_data_owner;
if (compression_type_ != "none") {
h_uncomp_data_owner = get_uncompressed_data(h_data, compression_type_);
h_data = h_uncomp_data_owner;
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 && skip_rows <= 0 &&
skip_end_rows <= 0 && num_rows == -1;
// With byte range, find the start of the first data row
size_t const data_start_offset = (range_offset != 0) ? find_first_row_start(h_data) : 0;
// TODO: Allow parsing the header outside the mapped range
CUDF_EXPECTS((range_offset == 0 || opts_.get_header() < 0),
"byte_range offset with header not supported");
// Gather row offsets
gather_row_offsets(h_data,
data_start_offset,
(range_size) ? range_size : h_data.size(),
(skip_rows > 0) ? skip_rows : 0,
num_rows,
load_whole_file,
stream);
// Exclude the rows that are to be skipped from the end
if (skip_end_rows > 0 && static_cast<size_t>(skip_end_rows) < row_offsets_.size()) {
row_offsets_.resize(row_offsets_.size() - skip_end_rows);
}
// Exclude the end-of-data row from number of rows with actual data
num_records_ = row_offsets_.size();
num_records_ -= (num_records_ > 0);
} else {
num_records_ = 0;
}
// Check if the user gave us a list of column names
if (not opts_.get_names().empty()) {
h_column_flags_.resize(opts_.get_names().size(), column_parse::enabled);
col_names_ = opts_.get_names();
} else {
col_names_ = setColumnNames(header_, opts, opts_.get_header(), opts_.get_prefix());
num_actual_cols_ = num_active_cols_ = col_names_.size();
h_column_flags_.resize(num_actual_cols_, column_parse::enabled);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < col_names_.size(); ++col_idx) {
if (col_names_[col_idx].empty()) {
col_names_[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
// Looking for duplicates
std::unordered_map<string, int> col_names_histogram;
for (auto &col_name : col_names_) {
// Operator [] inserts a default-initialized value if the given key is not
// present
if (++col_names_histogram[col_name] > 1) {
if (opts_.is_enabled_mangle_dupe_cols()) {
// Rename duplicates of column X as X.1, X.2, ...; First appearance
// stays as X
col_name += "." + std::to_string(col_names_histogram[col_name] - 1);
} else {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - col_names_.data();
h_column_flags_[idx] = column_parse::disabled;
}
}
}
// Update the number of columns to be processed, if some might have been
// removed
if (!opts_.is_enabled_mangle_dupe_cols()) { num_active_cols_ = col_names_histogram.size(); }
}
// User can specify which columns should be parsed
if (!opts_.get_use_cols_indexes().empty() || !opts_.get_use_cols_names().empty()) {
std::fill(h_column_flags_.begin(), h_column_flags_.end(), column_parse::disabled);
for (const auto index : opts_.get_use_cols_indexes()) {
h_column_flags_[index] = column_parse::enabled;
}
num_active_cols_ = opts_.get_use_cols_indexes().size();
for (const auto &name : opts_.get_use_cols_names()) {
const auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
h_column_flags_[it - col_names_.begin()] = column_parse::enabled;
num_active_cols_++;
}
}
}
// User can specify which columns should be inferred as datetime
if (!opts_.get_infer_date_indexes().empty() || !opts_.get_infer_date_names().empty()) {
for (const auto index : opts_.get_infer_date_indexes()) {
h_column_flags_[index] |= column_parse::as_datetime;
}
for (const auto &name : opts_.get_infer_date_names()) {
auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
h_column_flags_[it - col_names_.begin()] |= column_parse::as_datetime;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_cols_ == 0) { return {std::make_unique<table>(), {}}; }
auto metadata = table_metadata{};
auto out_columns = std::vector<std::unique_ptr<cudf::column>>();
auto column_types = gather_column_types(stream);
out_columns.reserve(column_types.size());
if (num_records_ != 0) {
auto out_buffers = decode_data(column_types, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
metadata.column_names.emplace_back(out_buffers[i].name);
if (column_types[i].id() == type_id::STRING && opts.quotechar != '\0' &&
opts.doublequote == true) {
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
// TODO: Would be much more efficient to perform this operation in-place
// during the conversion stage
const std::string quotechar(1, opts.quotechar);
const std::string dblquotechar(2, opts.quotechar);
std::unique_ptr<column> col = make_strings_column(out_buffers[i]._strings, stream);
out_columns.emplace_back(
cudf::strings::replace(col->view(), dblquotechar, quotechar, -1, mr_));
} else {
out_columns.emplace_back(make_column(out_buffers[i], stream, mr_));
}
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Handle empty metadata
for (int col = 0; col < num_actual_cols_; ++col) {
if (h_column_flags_[col] & column_parse::enabled) {
metadata.column_names.emplace_back(col_names_[col]);
}
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata)};
}
size_t reader::impl::find_first_row_start(host_span<char const> const data)
{
// For now, look for the first terminator (assume the first terminator isn't within a quote)
// TODO: Attempt to infer this from the data
size_t pos = 0;
while (pos < data.size() && data[pos] != opts.terminator) { ++pos; }
return std::min(pos + 1, data.size());
}
void reader::impl::gather_row_offsets(host_span<char const> const data,
size_t range_begin,
size_t range_end,
size_t skip_rows,
int64_t num_rows,
bool load_whole_file,
cudaStream_t stream)
{
constexpr size_t max_chunk_bytes = 64 * 1024 * 1024; // 64MB
size_t buffer_size = std::min(max_chunk_bytes, data.size());
size_t max_blocks =
std::max<size_t>((buffer_size / cudf::io::csv::gpu::rowofs_block_bytes) + 1, 2);
hostdevice_vector<uint64_t> row_ctx(max_blocks);
size_t buffer_pos = std::min(range_begin - std::min(range_begin, sizeof(char)), data.size());
size_t pos = std::min(range_begin, data.size());
size_t header_rows = (opts_.get_header() >= 0) ? opts_.get_header() + 1 : 0;
uint64_t ctx = 0;
// For compatibility with the previous parser, a row is considered in-range if the
// previous row terminator is within the given range
range_end += (range_end < data.size());
data_.resize(0);
row_offsets_.resize(0);
data_.reserve((load_whole_file) ? data.size() : std::min(buffer_size * 2, data.size()));
do {
size_t target_pos = std::min(pos + max_chunk_bytes, data.size());
size_t chunk_size = target_pos - pos;
data_.insert(data_.end(), data.begin() + buffer_pos + data_.size(), data.begin() + target_pos);
// Pass 1: Count the potential number of rows in each character block for each
// possible parser state at the beginning of the block.
uint32_t num_blocks = cudf::io::csv::gpu::gather_row_offsets(opts,
row_ctx.device_ptr(),
device_span<uint64_t>(),
data_,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
// Sum up the rows in each character block, selecting the row count that
// corresponds to the current input context. Also stores the now known input
// context per character block that will be needed by the second pass.
for (uint32_t i = 0; i < num_blocks; i++) {
uint64_t ctx_next = cudf::io::csv::gpu::select_row_context(ctx, row_ctx[i]);
row_ctx[i] = ctx;
ctx = ctx_next;
}
size_t total_rows = ctx >> 2;
if (total_rows > skip_rows) {
// At least one row in range in this batch
row_offsets_.resize(total_rows - skip_rows);
CUDA_TRY(cudaMemcpyAsync(row_ctx.device_ptr(),
row_ctx.host_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyHostToDevice,
stream));
// Pass 2: Output row offsets
cudf::io::csv::gpu::gather_row_offsets(opts,
row_ctx.device_ptr(),
row_offsets_,
data_,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
// With byte range, we want to keep only one row out of the specified range
if (range_end < data.size()) {
CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
size_t rows_out_of_range = 0;
for (uint32_t i = 0; i < num_blocks; i++) { rows_out_of_range += row_ctx[i]; }
if (rows_out_of_range != 0) {
// Keep one row out of range (used to infer length of previous row)
auto new_row_offsets_size =
row_offsets_.size() - std::min(rows_out_of_range - 1, row_offsets_.size());
row_offsets_.resize(new_row_offsets_size);
// Implies we reached the end of the range
break;
}
}
// num_rows does not include blank rows
if (num_rows >= 0) {
if (row_offsets_.size() > header_rows + static_cast<size_t>(num_rows)) {
size_t num_blanks =
cudf::io::csv::gpu::count_blank_rows(opts, data_, row_offsets_, stream);
if (row_offsets_.size() - num_blanks > header_rows + static_cast<size_t>(num_rows)) {
// Got the desired number of rows
break;
}
}
}
} else {
// Discard data (all rows below skip_rows), keeping one character for history
size_t discard_bytes = std::max(data_.size(), sizeof(char)) - sizeof(char);
if (discard_bytes != 0) {
data_.erase(data_.begin(), data_.begin() + discard_bytes);
buffer_pos += discard_bytes;
}
}
pos = target_pos;
} while (pos < data.size());
// Eliminate blank rows
if (row_offsets_.size() != 0) {
cudf::io::csv::gpu::remove_blank_rows(opts, data_, row_offsets_, stream);
}
// Remove header rows and extract header
const size_t header_row_index = std::max<size_t>(header_rows, 1) - 1;
if (header_row_index + 1 < row_offsets_.size()) {
CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_offsets_.data().get() + header_row_index,
2 * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
const auto header_start = buffer_pos + row_ctx[0];
const auto header_end = buffer_pos + row_ctx[1];
CUDF_EXPECTS(header_start <= header_end && header_end <= data.size(),
"Invalid csv header location");
header_.assign(data.begin() + header_start, data.begin() + header_end);
if (header_rows > 0) {
row_offsets_.erase(row_offsets_.begin(), row_offsets_.begin() + header_rows);
}
}
// Apply num_rows limit
if (num_rows >= 0) { row_offsets_.resize(std::min<size_t>(row_offsets_.size(), num_rows + 1)); }
}
std::vector<data_type> reader::impl::gather_column_types(cudaStream_t stream)
{
std::vector<data_type> dtypes;
if (opts_.get_dtypes().empty()) {
if (num_records_ == 0) {
dtypes.resize(num_active_cols_, data_type{type_id::EMPTY});
} else {
d_column_flags_ = h_column_flags_;
auto column_stats = cudf::io::csv::gpu::detect_column_types(
opts, data_, d_column_flags_, row_offsets_, num_active_cols_, stream);
CUDA_TRY(cudaStreamSynchronize(stream));
for (int col = 0; col < num_active_cols_; col++) {
unsigned long long countInt = column_stats[col].countInt8 + column_stats[col].countInt16 +
column_stats[col].countInt32 + column_stats[col].countInt64;
if (column_stats[col].countNULL == num_records_) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes.emplace_back(cudf::type_id::INT8);
} else if (column_stats[col].countString > 0L) {
dtypes.emplace_back(cudf::type_id::STRING);
} else if (column_stats[col].countDateAndTime > 0L) {
dtypes.emplace_back(cudf::type_id::TIMESTAMP_NANOSECONDS);
} else if (column_stats[col].countBool > 0L) {
dtypes.emplace_back(cudf::type_id::BOOL8);
} else if (column_stats[col].countFloat > 0L ||
(column_stats[col].countFloat == 0L && countInt > 0L &&
column_stats[col].countNULL > 0L)) {
// The second condition has been added to conform to
// PANDAS which states that a column of integers with
// a single NULL record need to be treated as floats.
dtypes.emplace_back(cudf::type_id::FLOAT64);
} else {
// All other integers are stored as 64-bit to conform to PANDAS
dtypes.emplace_back(cudf::type_id::INT64);
}
}
}
} else {
const bool is_dict =
std::all_of(opts_.get_dtypes().begin(), opts_.get_dtypes().end(), [](const auto &s) {
return s.find(':') != std::string::npos;
});
if (!is_dict) {
if (opts_.get_dtypes().size() == 1) {
// If it's a single dtype, assign that dtype to all active columns
data_type dtype_;
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[0]);
dtypes.resize(num_active_cols_, dtype_);
for (int col = 0; col < num_actual_cols_; col++) { h_column_flags_[col] |= col_flags_; }
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
} else {
// If it's a list, assign dtypes to active columns in the given order
CUDF_EXPECTS(static_cast<int>(opts_.get_dtypes().size()) >= num_actual_cols_,
"Must specify data types for all columns");
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (h_column_flags_[col] & column_parse::enabled) {
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[col]);
h_column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
} else {
// Translate vector of `name : dtype` strings to map
// NOTE: Incoming pairs can be out-of-order from column names in dataset
std::unordered_map<std::string, std::string> col_type_map;
for (const auto &pair : opts_.get_dtypes()) {
const auto pos = pair.find_last_of(':');
const auto name = pair.substr(0, pos);
const auto dtype = pair.substr(pos + 1, pair.size());
col_type_map[name] = dtype;
}
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (h_column_flags_[col] & column_parse::enabled) {
CUDF_EXPECTS(col_type_map.find(col_names_[col]) != col_type_map.end(),
"Must specify data types for all active columns");
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(col_type_map[col_names_[col]]);
h_column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
}
if (opts_.get_timestamp_type().id() != cudf::type_id::EMPTY) {
for (auto &type : dtypes) {
if (cudf::is_timestamp(type)) { type = opts_.get_timestamp_type(); }
}
}
for (size_t i = 0; i < dtypes.size(); i++) {
// Replace EMPTY dtype with STRING
if (dtypes[i].id() == type_id::EMPTY) { dtypes[i] = data_type{type_id::STRING}; }
}
return dtypes;
}
std::vector<column_buffer> reader::impl::decode_data(std::vector<data_type> const &column_types,
cudaStream_t stream)
{
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (int col = 0, active_col = 0; col < num_actual_cols_; ++col) {
if (h_column_flags_[col] & column_parse::enabled) {
const bool is_final_allocation = column_types[active_col].id() != type_id::STRING;
auto out_buffer =
column_buffer(column_types[active_col],
num_records_,
true,
stream,
is_final_allocation ? mr_ : rmm::mr::get_current_device_resource());
out_buffer.name = col_names_[col];
out_buffers.emplace_back(std::move(out_buffer));
active_col++;
}
}
thrust::host_vector<void *> h_data(num_active_cols_);
thrust::host_vector<bitmask_type *> h_valid(num_active_cols_);
for (int i = 0; i < num_active_cols_; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
rmm::device_vector<data_type> d_dtypes(column_types);
rmm::device_vector<void *> d_data = h_data;
rmm::device_vector<bitmask_type *> d_valid = h_valid;
d_column_flags_ = h_column_flags_;
cudf::io::csv::gpu::decode_row_column_data(
opts, data_, d_column_flags_, row_offsets_, d_dtypes, d_data, d_valid, stream);
CUDA_TRY(cudaStreamSynchronize(stream));
for (int i = 0; i < num_active_cols_; ++i) { out_buffers[i].null_count() = UNKNOWN_NULL_COUNT; }
return out_buffers;
}
reader::impl::impl(std::unique_ptr<datasource> source,
std::string filepath,
csv_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: mr_(mr), source_(std::move(source)), filepath_(filepath), opts_(options)
{
num_actual_cols_ = opts_.get_names().size();
num_active_cols_ = num_actual_cols_;
if (opts_.is_enabled_delim_whitespace()) {
opts.delimiter = ' ';
opts.multi_delimiter = true;
} else {
opts.delimiter = opts_.get_delimiter();
opts.multi_delimiter = false;
}
opts.terminator = opts_.get_lineterminator();
if (opts_.get_quotechar() != '\0' && opts_.get_quoting() != quote_style::NONE) {
opts.quotechar = opts_.get_quotechar();
opts.keepquotes = false;
opts.doublequote = opts_.is_enabled_doublequote();
} else {
opts.quotechar = '\0';
opts.keepquotes = true;
opts.doublequote = false;
}
opts.skipblanklines = opts_.is_enabled_skip_blank_lines();
opts.comment = opts_.get_comment();
opts.dayfirst = opts_.is_enabled_dayfirst();
opts.decimal = opts_.get_decimal();
opts.thousands = opts_.get_thousands();
CUDF_EXPECTS(opts.decimal != opts.delimiter, "Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(opts.thousands != opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
compression_type_ =
infer_compression_type(opts_.get_compression(),
filepath,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
// Handle user-defined false values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (opts_.get_true_values().size() != 0) {
d_trie_true_ = createSerializedTrie(opts_.get_true_values());
opts.trueValuesTrie = d_trie_true_.data().get();
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (opts_.get_false_values().size() != 0) {
d_trie_false_ = createSerializedTrie(opts_.get_false_values());
opts.falseValuesTrie = d_trie_false_.data().get();
}
// Handle user-defined N/A values, whereby field data is treated as null
if (opts_.get_na_values().size() != 0) {
d_trie_na_ = createSerializedTrie(opts_.get_na_values());
opts.naValuesTrie = d_trie_na_.data().get();
}
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
csv_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
_impl = std::make_unique<impl>(nullptr, filepaths[0], options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
csv_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), "", options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(cudaStream_t stream) { return _impl->read(stream); }
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
|
5e2486e17c43d7f5eec5dc446a53a446b5293bad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
} | 5e2486e17c43d7f5eec5dc446a53a446b5293bad.cu | #include "includes.h"
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
} |
6b2f69632f204893795c7f272c03c42b83216f35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_monte_carlo_curand_d(double *estimate, hiprandState_t *states) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
int points_in_circle = 0;
double x, y;
hiprand_init(1234, tid, 0, &states[tid]); // Initialize CURAND
for(int i = 0; i < TRIALS_PER_THREAD; i++) {
x = hiprand_uniform (&states[tid]);
y = hiprand_uniform (&states[tid]);
points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle.
}
estimate[tid] = 4.0f * points_in_circle / (double) TRIALS_PER_THREAD; // return estimate of pi
} | 6b2f69632f204893795c7f272c03c42b83216f35.cu | #include "includes.h"
__global__ void gpu_monte_carlo_curand_d(double *estimate, curandState *states) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
int points_in_circle = 0;
double x, y;
curand_init(1234, tid, 0, &states[tid]); // Initialize CURAND
for(int i = 0; i < TRIALS_PER_THREAD; i++) {
x = curand_uniform (&states[tid]);
y = curand_uniform (&states[tid]);
points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle.
}
estimate[tid] = 4.0f * points_in_circle / (double) TRIALS_PER_THREAD; // return estimate of pi
} |
849bca024717a8f69290d2b6e7473a6da5231fd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <random>
#include "common.h"
#include "multilgKernels.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by multilingual nmt model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: ker_multilg_enc_emb
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, batch_seq_len]
output: result, [batch_size, batch_seq_len, hidden_size]
padding_mask: record the padding token, [batch_size, batch_seq_len]
padding_id, the padding token id
*/
template <typename T>
__global__ void ker_multilg_enc_emb(const T* token_emb, const T* pos_emb,
const T* src_lang_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
const int hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * hidden_size + threadIdx.x;
int end = (target_pos + 1) * hidden_size;
int tid = token_id[target_pos];
int lang_id = token_id[blockIdx.x * gridDim.y];
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
// output[target_pos * blockDim.x + threadIdx.x] = 0.f;
output[i] = 0.f;
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * hidden_size;
output[i] = token_emb[tid * hidden_size + offset] +
pos_emb[blockIdx.y * hidden_size + offset] +
src_lang_emb[lang_id * hidden_size + offset];
}
}
template <>
__global__ void ker_multilg_enc_emb<__half>(const __half* token_emb,
const __half* pos_emb,
const __half* src_lang_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
const int half_hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * half_hidden_size + threadIdx.x;
int end = (target_pos + 1) * half_hidden_size;
int tid = token_id[target_pos];
int lang_id = token_id[blockIdx.x * gridDim.y];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
output_h[i] = __float2half2_rn(0.f);
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * half_hidden_size;
float2 te = __half22float2(
((const half2*)token_emb)[tid * half_hidden_size + offset]);
float2 pe = __half22float2(
((const half2*)pos_emb)[blockIdx.y * half_hidden_size + offset]);
float2 le = __half22float2(
((const half2*)src_lang_emb)[lang_id * half_hidden_size + offset]);
te.x = te.x + pe.x + le.x;
te.y = te.y + pe.y + le.y;
output_h[i] = __float22half2_rn(te);
}
}
template <typename T>
void ker_multilg_enc_emb_launcher(int batch_size, int batch_seq_len,
int hidden_size, hipStream_t stream,
const T* token_emb, const T* pos_emb,
const T* src_lang_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_multilg_enc_emb<T>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(max_thread_per_block), 0, stream,
token_emb, pos_emb, src_lang_emb, token_id, output, padding_mask, padding_id,
hidden_size);
}
template <>
void ker_multilg_enc_emb_launcher<__half>(int batch_size, int batch_seq_len,
int hidden_size, hipStream_t stream,
const __half* token_emb,
const __half* pos_emb,
const __half* src_lang_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_multilg_enc_emb<__half>)
, dim3(dim3(batch_size, batch_seq_len)), dim3(max_thread_per_block), 0, stream,
token_emb, pos_emb, src_lang_emb, token_id, output, padding_mask, padding_id,
hidden_size / 2);
}
template void ker_multilg_enc_emb_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const float* token_emb, const float* pos_emb, const float* src_lang_emb,
const int* token_id, float* output, int* padding_mask, int padding_id,
int max_thread_per_block);
template void ker_multilg_enc_emb_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb, const __half* src_lang_emb,
const int* token_id, __half* output, int* padding_mask, int padding_id,
int max_thread_per_block);
/**
@brief: ker_multilg_dec_emb
for multilingual decoder, look up token embedding, add position embedding
and lang embedding
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
token_emb: [hidden_size, vocab_size], note, it is different with encoder
pos_emb: [max_step, hidden_size]
src_lang_emb: [lang_num, hidden_size]
trg_lang_emb: [lang_num, hidden_size]
src_token_id: [batch_size, src_seq_len]
token_id: input token id, [batch_size, beam_size, max_step]
output: result, [batch_size, beam_size, hidden_size]
step: current step
max_step: max decoder steps
vocab_size: vocabulary size
*/
template <typename T>
__global__ void ker_multilg_dec_emb(const T* token_emb, const T* pos_emb,
const T* src_lang_emb, const T* trg_lang_emb,
const int* src_token_id,
const int* token_id, T* output, int step,
int max_step, int vocab_size,
int hidden_size, int beam_size, int src_seq_len) {
int batch_id = blockIdx.x / beam_size;
// src seq is in [src_lang_id, trg_lang_id, tokens...] format
int src_lang_id = src_token_id[batch_id * src_seq_len];
int trg_lang_id = src_token_id[batch_id * src_seq_len + 1];
int token_idx = (step == 0 ? trg_lang_id : token_id[blockIdx.x * max_step + step]);
for (uint offset = threadIdx.x; offset < hidden_size; offset += blockDim.x) {
output[blockIdx.x * hidden_size + offset] =
token_emb[offset * vocab_size + token_idx] +
pos_emb[step * hidden_size + offset] +
src_lang_emb[src_lang_id * hidden_size + offset] +
trg_lang_emb[trg_lang_id * hidden_size + offset];
}
}
template <typename T>
void ker_multilg_dec_emb_launcher(int step_token_num, int hidden_size,
hipStream_t stream, const T* token_emb,
const T* pos_emb, const T* src_lang_emb,
const T* trg_lang_emb, const int* src_token_id,
const int* token_id, T* output, int step, int max_step,
int vocab_size, int beam_size, int src_seq_len,
int max_thread_per_block) {
hipLaunchKernelGGL(( ker_multilg_dec_emb<T>), dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
token_emb, pos_emb, src_lang_emb, trg_lang_emb, src_token_id,
token_id, output, step, max_step, vocab_size,
hidden_size, beam_size, src_seq_len);
}
template void ker_multilg_dec_emb_launcher<float>(
int step_token_num, int hidden_size, hipStream_t stream,
const float* token_emb, const float* pos_emb,
const float* src_lang_emb, const float* trg_lang_emb,
const int* src_token_id, const int* token_id,
float* output, int step, int max_step,
int vocab_size, int beam_size, int src_seq_len, int max_thread_per_block);
template void ker_multilg_dec_emb_launcher<__half>(
int step_token_num, int hidden_size, hipStream_t stream,
const __half* token_emb, const __half* pos_emb,
const __half* src_lang_emb, const __half* trg_lang_emb,
const int* src_token_id, const int* token_id,
__half* output, int step, int max_step,
int vocab_size, int beam_size, int src_seq_len, int max_thread_per_block);
/**
@brief: select_beam_rough_topk_multilg
one block for one beam, compute the log seq probability ended with every token
in
vocab, base on the previous log seq probability and current step's logit, select
rough topK candidate.
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, beam_size, vocab_size], cur step logit
logit_bias: [vocab_size], logit bias
seq_probs: [batch_size, beam_size], prefix sequence log probability
seq_score: [batch_size, beam_size], prefix sequence score
alive_seq: [batch_size, beam_size, max_step], prefix sequence id
can_idx: [batch_size, beam_size, vocab_size], topk candidate's index
can_score: [batch_size, beam_size, vocab_size], topk candidate's score
num_beam_can: [1 + batch_size * beam_size].
the first ele save the number of topk candidate of the whole batch
the remaining batch_size * beam_size ele save the number of topk candidate
of each beam
vocab_size: the vocab size of decoder
max_step: max decode step
length_norm: length penlty value for current step
cur_step: current step
diverse_lambda: lambda for diverse beam search
*/
template <typename T, int beam_size>
__global__ void select_beam_rough_topk_multilg(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step,
float diverse_lambda, int end_id, int src_seq_len) {
if (alive_seq[blockIdx.x * max_step + cur_step] == end_id) {
// this is a finished beam
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = 1; // generate one candidate
int pos = atomicAdd(num_beam_can, 1); // get a candidate pos
if (diverse_lambda == 0) {
can_score[pos] =
seq_score[blockIdx.x]; // this beam's score will not be change
} else {
// add the beam id offset in score to sort in each beam
int batch_id = blockIdx.x / beam_size;
can_score[pos] = seq_score[blockIdx.x] +
(blockIdx.x - batch_id) * min_log_probability;
}
can_idx[pos] = end_id + (blockIdx.x % beam_size) * vocab_size; // EOS
}
return;
}
/* step1: compute each thread's max_logit and sum_exp_logit, store in
* rough_top_kth_logit, sum_exp_logit */
int batch_id = blockIdx.x / beam_size;
int trg_lang_id = src_token_id[batch_id * src_seq_len + 1];
const int block_start = blockIdx.x * vocab_size;
const int left_idx = block_start + threadIdx.x;
const int right_idx = (blockIdx.x + 1) * vocab_size;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
float sum_exp_logit = 0;
for (int i = left_idx; i < right_idx; i += blockDim.x) {
int lang_mask = vocab_mask[trg_lang_id * vocab_size + i - block_start];
float lgt = (lang_mask == 0 ? CUDA_FLOAT_INF_NEG :
(float)logits[i] + (float)__ldg(&logit_bias[i - block_start]));
rough_top_kth_logit = fmaxf(rough_top_kth_logit, lgt);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
for (int i = left_idx; i < right_idx; i += blockDim.x) {
int lang_mask = vocab_mask[trg_lang_id * vocab_size + i - block_start];
float lgt = lang_mask == 0 ? 0.f :
expf(fmaxf(
(float)(logits[i]) + (float)__ldg(&logit_bias[i - block_start]) -
s_max_logit, logit_thresh_min));
sum_exp_logit += lgt;
}
/*
step2: compute rough top-kth-logits and sum_exp_logit among the whole beam,
saved into s_topk and
s_log_prob_base
*/
__shared__ float
s_log_prob_base; // prefix sequence log prob - log_sum_exp_logit
__shared__ float s_topk; // rough top k-th value of logits
__shared__ int num_cur_beam_can; // candidate number for this beam
sum_exp_logit = blockReduceSum(sum_exp_logit);
rough_top_kth_logit = blockRoughTopK<float, beam_size>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_log_prob_base = seq_probs[blockIdx.x] - logf(sum_exp_logit) - s_max_logit;
s_topk = rough_top_kth_logit;
num_cur_beam_can = 0;
}
/*
step3 : select the candidate token with logits bigger than s_topk,
compute the seq probability ended with them,
save the probability, token_index, selected token number.
*/
int idx = left_idx;
int batch_start_pos = batch_id * beam_size * vocab_size;
// int unk_vocab_id = vocab_size - 3; // last three element: unk, start, eos
__shared__ int l_n; // current iteration candidate number
for (int iter = 0; iter < (vocab_size + blockDim.x - 1) / blockDim.x;
iter++) {
// zero the counter
if (threadIdx.x == 0) l_n = 0;
__syncthreads();
float lgt = CUDA_FLOAT_INF_NEG - 1.f; // min s_topk is CUDA_FLOAT_INF_NEG
int pos;
int vocab_id = idx - block_start;
// if ((vocab_id < vocab_size) && (vocab_id != unk_vocab_id)) {
if (vocab_id < vocab_size) {
int lang_mask = vocab_mask[trg_lang_id * vocab_size + vocab_id];
if (lang_mask != 0) {
lgt = (float)(logits[idx]) + (float)__ldg(&logit_bias[vocab_id]);
if (lgt >= s_topk)
// pos: relative pos inside this iteration
pos = atomicAdd(&l_n, 1);
}
}
__syncthreads();
// leader increments the global counter
if (threadIdx.x == 0) {
atomicAdd(&num_cur_beam_can, l_n);
l_n = atomicAdd(num_beam_can, l_n);
}
__syncthreads();
// threads with true predicates write their elements
if ((lgt >= s_topk)) {
pos += l_n; // increment local pos by global counter
if (diverse_lambda == 0) {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
batch_id * min_log_probability;
} else {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
blockIdx.x * min_log_probability;
}
can_idx[pos] = idx - batch_start_pos;
}
__syncthreads();
idx += blockDim.x;
}
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = num_cur_beam_can;
}
}
template <typename T>
void select_beam_rough_topk_multilg_launcher(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id,
int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, hipStream_t stream, int beam_size,
float diverse_lambda, int end_id, int src_seq_len) {
if (beam_size == 1)
hipLaunchKernelGGL(( select_beam_rough_topk_multilg<T, 1>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 2)
hipLaunchKernelGGL(( select_beam_rough_topk_multilg<T, 2>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 4)
hipLaunchKernelGGL(( select_beam_rough_topk_multilg<T, 4>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 8)
hipLaunchKernelGGL(( select_beam_rough_topk_multilg<T, 8>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 16)
hipLaunchKernelGGL(( select_beam_rough_topk_multilg<T, 16>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 32)
hipLaunchKernelGGL(( select_beam_rough_topk_multilg<T, 32>)
, dim3(step_token_num), dim3(max_thread_per_block), 0, stream,
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
}
template void select_beam_rough_topk_multilg_launcher<float>(
const float* logits, const float* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id,
int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, hipStream_t stream, int beam_size,
float diverse_lambda, int end_id, int src_seq_len);
template void select_beam_rough_topk_multilg_launcher<__half>(
const __half* logits, const __half* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id,
int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, hipStream_t stream, int beam_size,
float diverse_lambda, int end_id, int src_seq_len);
} // namespace cuda
} // namespace lightseq
| 849bca024717a8f69290d2b6e7473a6da5231fd7.cu | #include <random>
#include "common.h"
#include "multilgKernels.h"
#include "transformerKernels.h"
/**
@file
Implemented the cuda kernel function and its launcher
that required by multilingual nmt model.
Currently, fp16 and fp32 versions are provided
*/
namespace lightseq {
namespace cuda {
/**
@brief: ker_multilg_enc_emb
for encoder, look up token embedding, add position embedding
@thread
gridDim.x = batch_size
gridDim.y = batch_seq_len
blockDim.x = max_thread_per_block
@param
token_emb: [vocab_size, hidden_size]
pos_emb: [max_step, hidden_size]
token_id: input token id, [batch_size, batch_seq_len]
output: result, [batch_size, batch_seq_len, hidden_size]
padding_mask: record the padding token, [batch_size, batch_seq_len]
padding_id, the padding token id
*/
template <typename T>
__global__ void ker_multilg_enc_emb(const T* token_emb, const T* pos_emb,
const T* src_lang_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
const int hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * hidden_size + threadIdx.x;
int end = (target_pos + 1) * hidden_size;
int tid = token_id[target_pos];
int lang_id = token_id[blockIdx.x * gridDim.y];
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
// output[target_pos * blockDim.x + threadIdx.x] = 0.f;
output[i] = 0.f;
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * hidden_size;
output[i] = token_emb[tid * hidden_size + offset] +
pos_emb[blockIdx.y * hidden_size + offset] +
src_lang_emb[lang_id * hidden_size + offset];
}
}
template <>
__global__ void ker_multilg_enc_emb<__half>(const __half* token_emb,
const __half* pos_emb,
const __half* src_lang_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
const int half_hidden_size) {
int target_pos = blockIdx.x * gridDim.y + blockIdx.y;
int start = target_pos * half_hidden_size + threadIdx.x;
int end = (target_pos + 1) * half_hidden_size;
int tid = token_id[target_pos];
int lang_id = token_id[blockIdx.x * gridDim.y];
half2* output_h = (half2*)output;
if (tid == padding_id) {
// for padding id
if (threadIdx.x == 0) padding_mask[target_pos] = 1;
for (uint i = start; i < end; i += blockDim.x) {
output_h[i] = __float2half2_rn(0.f);
}
return;
}
if (threadIdx.x == 0) {
padding_mask[target_pos] = 0;
}
for (uint i = start; i < end; i += blockDim.x) {
int offset = i - target_pos * half_hidden_size;
float2 te = __half22float2(
((const half2*)token_emb)[tid * half_hidden_size + offset]);
float2 pe = __half22float2(
((const half2*)pos_emb)[blockIdx.y * half_hidden_size + offset]);
float2 le = __half22float2(
((const half2*)src_lang_emb)[lang_id * half_hidden_size + offset]);
te.x = te.x + pe.x + le.x;
te.y = te.y + pe.y + le.y;
output_h[i] = __float22half2_rn(te);
}
}
template <typename T>
void ker_multilg_enc_emb_launcher(int batch_size, int batch_seq_len,
int hidden_size, cudaStream_t stream,
const T* token_emb, const T* pos_emb,
const T* src_lang_emb,
const int* token_id, T* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
ker_multilg_enc_emb<T>
<<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>(
token_emb, pos_emb, src_lang_emb, token_id, output, padding_mask, padding_id,
hidden_size);
}
template <>
void ker_multilg_enc_emb_launcher<__half>(int batch_size, int batch_seq_len,
int hidden_size, cudaStream_t stream,
const __half* token_emb,
const __half* pos_emb,
const __half* src_lang_emb,
const int* token_id, __half* output,
int* padding_mask, int padding_id,
int max_thread_per_block) {
ker_multilg_enc_emb<__half>
<<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>(
token_emb, pos_emb, src_lang_emb, token_id, output, padding_mask, padding_id,
hidden_size / 2);
}
template void ker_multilg_enc_emb_launcher<float>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const float* token_emb, const float* pos_emb, const float* src_lang_emb,
const int* token_id, float* output, int* padding_mask, int padding_id,
int max_thread_per_block);
template void ker_multilg_enc_emb_launcher<__half>(
int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb, const __half* src_lang_emb,
const int* token_id, __half* output, int* padding_mask, int padding_id,
int max_thread_per_block);
/**
@brief: ker_multilg_dec_emb
for multilingual decoder, look up token embedding, add position embedding
and lang embedding
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
token_emb: [hidden_size, vocab_size], note, it is different with encoder
pos_emb: [max_step, hidden_size]
src_lang_emb: [lang_num, hidden_size]
trg_lang_emb: [lang_num, hidden_size]
src_token_id: [batch_size, src_seq_len]
token_id: input token id, [batch_size, beam_size, max_step]
output: result, [batch_size, beam_size, hidden_size]
step: current step
max_step: max decoder steps
vocab_size: vocabulary size
*/
template <typename T>
__global__ void ker_multilg_dec_emb(const T* token_emb, const T* pos_emb,
const T* src_lang_emb, const T* trg_lang_emb,
const int* src_token_id,
const int* token_id, T* output, int step,
int max_step, int vocab_size,
int hidden_size, int beam_size, int src_seq_len) {
int batch_id = blockIdx.x / beam_size;
// src seq is in [src_lang_id, trg_lang_id, tokens...] format
int src_lang_id = src_token_id[batch_id * src_seq_len];
int trg_lang_id = src_token_id[batch_id * src_seq_len + 1];
int token_idx = (step == 0 ? trg_lang_id : token_id[blockIdx.x * max_step + step]);
for (uint offset = threadIdx.x; offset < hidden_size; offset += blockDim.x) {
output[blockIdx.x * hidden_size + offset] =
token_emb[offset * vocab_size + token_idx] +
pos_emb[step * hidden_size + offset] +
src_lang_emb[src_lang_id * hidden_size + offset] +
trg_lang_emb[trg_lang_id * hidden_size + offset];
}
}
template <typename T>
void ker_multilg_dec_emb_launcher(int step_token_num, int hidden_size,
cudaStream_t stream, const T* token_emb,
const T* pos_emb, const T* src_lang_emb,
const T* trg_lang_emb, const int* src_token_id,
const int* token_id, T* output, int step, int max_step,
int vocab_size, int beam_size, int src_seq_len,
int max_thread_per_block) {
ker_multilg_dec_emb<T><<<step_token_num, max_thread_per_block, 0, stream>>>(
token_emb, pos_emb, src_lang_emb, trg_lang_emb, src_token_id,
token_id, output, step, max_step, vocab_size,
hidden_size, beam_size, src_seq_len);
}
template void ker_multilg_dec_emb_launcher<float>(
int step_token_num, int hidden_size, cudaStream_t stream,
const float* token_emb, const float* pos_emb,
const float* src_lang_emb, const float* trg_lang_emb,
const int* src_token_id, const int* token_id,
float* output, int step, int max_step,
int vocab_size, int beam_size, int src_seq_len, int max_thread_per_block);
template void ker_multilg_dec_emb_launcher<__half>(
int step_token_num, int hidden_size, cudaStream_t stream,
const __half* token_emb, const __half* pos_emb,
const __half* src_lang_emb, const __half* trg_lang_emb,
const int* src_token_id, const int* token_id,
__half* output, int step, int max_step,
int vocab_size, int beam_size, int src_seq_len, int max_thread_per_block);
/**
@brief: select_beam_rough_topk_multilg
one block for one beam, compute the log seq probability ended with every token
in
vocab, base on the previous log seq probability and current step's logit, select
rough topK candidate.
@thread
gridDim.x = batch_size * beam_size
blockDim.x = max_thread_per_block
@param
logits: [batch_size, beam_size, vocab_size], cur step logit
logit_bias: [vocab_size], logit bias
seq_probs: [batch_size, beam_size], prefix sequence log probability
seq_score: [batch_size, beam_size], prefix sequence score
alive_seq: [batch_size, beam_size, max_step], prefix sequence id
can_idx: [batch_size, beam_size, vocab_size], topk candidate's index
can_score: [batch_size, beam_size, vocab_size], topk candidate's score
num_beam_can: [1 + batch_size * beam_size].
the first ele save the number of topk candidate of the whole batch
the remaining batch_size * beam_size ele save the number of topk candidate
of each beam
vocab_size: the vocab size of decoder
max_step: max decode step
length_norm: length penlty value for current step
cur_step: current step
diverse_lambda: lambda for diverse beam search
*/
template <typename T, int beam_size>
__global__ void select_beam_rough_topk_multilg(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id, int* can_idx,
float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step,
float diverse_lambda, int end_id, int src_seq_len) {
if (alive_seq[blockIdx.x * max_step + cur_step] == end_id) {
// this is a finished beam
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = 1; // generate one candidate
int pos = atomicAdd(num_beam_can, 1); // get a candidate pos
if (diverse_lambda == 0) {
can_score[pos] =
seq_score[blockIdx.x]; // this beam's score will not be change
} else {
// add the beam id offset in score to sort in each beam
int batch_id = blockIdx.x / beam_size;
can_score[pos] = seq_score[blockIdx.x] +
(blockIdx.x - batch_id) * min_log_probability;
}
can_idx[pos] = end_id + (blockIdx.x % beam_size) * vocab_size; // EOS
}
return;
}
/* step1: compute each thread's max_logit and sum_exp_logit, store in
* rough_top_kth_logit, sum_exp_logit */
int batch_id = blockIdx.x / beam_size;
int trg_lang_id = src_token_id[batch_id * src_seq_len + 1];
const int block_start = blockIdx.x * vocab_size;
const int left_idx = block_start + threadIdx.x;
const int right_idx = (blockIdx.x + 1) * vocab_size;
float rough_top_kth_logit = CUDA_FLOAT_INF_NEG;
float sum_exp_logit = 0;
for (int i = left_idx; i < right_idx; i += blockDim.x) {
int lang_mask = vocab_mask[trg_lang_id * vocab_size + i - block_start];
float lgt = (lang_mask == 0 ? CUDA_FLOAT_INF_NEG :
(float)logits[i] + (float)__ldg(&logit_bias[i - block_start]));
rough_top_kth_logit = fmaxf(rough_top_kth_logit, lgt);
}
float max_logit = blockReduceMax(rough_top_kth_logit);
__shared__ float s_max_logit;
if (threadIdx.x == 0) {
s_max_logit = max_logit;
}
__syncthreads();
for (int i = left_idx; i < right_idx; i += blockDim.x) {
int lang_mask = vocab_mask[trg_lang_id * vocab_size + i - block_start];
float lgt = lang_mask == 0 ? 0.f :
expf(fmaxf(
(float)(logits[i]) + (float)__ldg(&logit_bias[i - block_start]) -
s_max_logit, logit_thresh_min));
sum_exp_logit += lgt;
}
/*
step2: compute rough top-kth-logits and sum_exp_logit among the whole beam,
saved into s_topk and
s_log_prob_base
*/
__shared__ float
s_log_prob_base; // prefix sequence log prob - log_sum_exp_logit
__shared__ float s_topk; // rough top k-th value of logits
__shared__ int num_cur_beam_can; // candidate number for this beam
sum_exp_logit = blockReduceSum(sum_exp_logit);
rough_top_kth_logit = blockRoughTopK<float, beam_size>(rough_top_kth_logit);
if (threadIdx.x == 0) {
s_log_prob_base = seq_probs[blockIdx.x] - logf(sum_exp_logit) - s_max_logit;
s_topk = rough_top_kth_logit;
num_cur_beam_can = 0;
}
/*
step3 : select the candidate token with logits bigger than s_topk,
compute the seq probability ended with them,
save the probability, token_index, selected token number.
*/
int idx = left_idx;
int batch_start_pos = batch_id * beam_size * vocab_size;
// int unk_vocab_id = vocab_size - 3; // last three element: unk, start, eos
__shared__ int l_n; // current iteration candidate number
for (int iter = 0; iter < (vocab_size + blockDim.x - 1) / blockDim.x;
iter++) {
// zero the counter
if (threadIdx.x == 0) l_n = 0;
__syncthreads();
float lgt = CUDA_FLOAT_INF_NEG - 1.f; // min s_topk is CUDA_FLOAT_INF_NEG
int pos;
int vocab_id = idx - block_start;
// if ((vocab_id < vocab_size) && (vocab_id != unk_vocab_id)) {
if (vocab_id < vocab_size) {
int lang_mask = vocab_mask[trg_lang_id * vocab_size + vocab_id];
if (lang_mask != 0) {
lgt = (float)(logits[idx]) + (float)__ldg(&logit_bias[vocab_id]);
if (lgt >= s_topk)
// pos: relative pos inside this iteration
pos = atomicAdd(&l_n, 1);
}
}
__syncthreads();
// leader increments the global counter
if (threadIdx.x == 0) {
atomicAdd(&num_cur_beam_can, l_n);
l_n = atomicAdd(num_beam_can, l_n);
}
__syncthreads();
// threads with true predicates write their elements
if ((lgt >= s_topk)) {
pos += l_n; // increment local pos by global counter
if (diverse_lambda == 0) {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
batch_id * min_log_probability;
} else {
can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm,
min_log_probability + 1.f) +
blockIdx.x * min_log_probability;
}
can_idx[pos] = idx - batch_start_pos;
}
__syncthreads();
idx += blockDim.x;
}
if (threadIdx.x == 0) {
num_beam_can[blockIdx.x + 1] = num_cur_beam_can;
}
}
template <typename T>
void select_beam_rough_topk_multilg_launcher(
const T* logits, const T* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id,
int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, cudaStream_t stream, int beam_size,
float diverse_lambda, int end_id, int src_seq_len) {
if (beam_size == 1)
select_beam_rough_topk_multilg<T, 1>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 2)
select_beam_rough_topk_multilg<T, 2>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 4)
select_beam_rough_topk_multilg<T, 4>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 8)
select_beam_rough_topk_multilg<T, 8>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 16)
select_beam_rough_topk_multilg<T, 16>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
if (beam_size == 32)
select_beam_rough_topk_multilg<T, 32>
<<<step_token_num, max_thread_per_block, 0, stream>>>(
logits, logit_bias, seq_probs, seq_score, alive_seq,
vocab_mask, src_token_id, can_idx, can_score, num_beam_can,
vocab_size, max_step, length_norm, cur_step,
diverse_lambda, end_id, src_seq_len);
}
template void select_beam_rough_topk_multilg_launcher<float>(
const float* logits, const float* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id,
int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, cudaStream_t stream, int beam_size,
float diverse_lambda, int end_id, int src_seq_len);
template void select_beam_rough_topk_multilg_launcher<__half>(
const __half* logits, const __half* logit_bias, const float* seq_probs,
const float* seq_score, const int* alive_seq,
const int* vocab_mask, const int* src_token_id,
int* can_idx, float* can_score, int* num_beam_can, int vocab_size, int max_step,
float length_norm, int cur_step, int step_token_num,
int max_thread_per_block, cudaStream_t stream, int beam_size,
float diverse_lambda, int end_id, int src_seq_len);
} // namespace cuda
} // namespace lightseq
|
9ab134b324dd3ceae32df337c3abb66c60f580ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author: Cao Thanh Tung, Ashwin Nanjappa
Date: 05-Aug-2014
===============================================================================
Copyright (c) 2011, School of Computing, National University of Singapore.
All rights reserved.
Project homepage: http://www.comp.nus.edu.sg/~tants/gdel3d.html
If you use gDel3D and you like it or have comments on its usefulness etc., we
would love to hear from you at <tants@comp.nus.edu.sg>. You may share with us
your experience and any possibilities that we may improve the work/code.
===============================================================================
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include "HostToKernel.h"
#include "KerCommon.h"
#include "KerPredicates.h"
#include "DPredWrapper.h"
#ifndef __HIPCC__
#define __launch_bounds__( x )
#endif
__constant__ DPredWrapper dPredWrapper;
#include "KerPredWrapper.h"
void setPredWrapperConstant( const DPredWrapper &hostPredWrapper )
{
CudaSafeCall( hipMemcpyToSymbol( dPredWrapper, &hostPredWrapper, sizeof( hostPredWrapper ) ) );
}
template<bool doFast>
__forceinline__ __device__ void initPointLocation
(
int* vertTetArr,
Tet tet,
int tetIdx
)
{
const int tetVert[5] = { tet._v[0], tet._v[1], tet._v[2], tet._v[3], dPredWrapper._infIdx };
const Point3 pt[] = {
dPredWrapper.getPoint( tetVert[0] ),
dPredWrapper.getPoint( tetVert[1] ),
dPredWrapper.getPoint( tetVert[2] ),
dPredWrapper.getPoint( tetVert[3] ),
dPredWrapper.getPoint( tetVert[4] )
};
// Iterate points
for ( int idx = getCurThreadIdx(); idx < dPredWrapper.pointNum(); idx += getThreadNum() )
{
if ( !doFast && vertTetArr[ idx ] != -2 ) // No exact check needed
continue;
if ( tet.has( idx ) || idx == dPredWrapper._infIdx ) // Already inserted
{
vertTetArr[ idx ] = -1;
continue;
}
Point3 ptVertex = dPredWrapper.getPoint( idx );
int face = 0;
for ( int i = 0; i < 4; ++i )
{
const int *fv = SplitFaces[ face ];
Orient ort = ( doFast )
? dPredWrapper.doOrient3DFast(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], idx,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex )
: dPredWrapper.doOrient3DSoS(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], idx,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex );
if ( doFast && (ort == OrientZero) ) { face = -tetIdx - 2; break; } // Needs exact computation
// Use the reverse direction 'cause the splitting point is Infty!
face = SplitNext[ face ][ ( ort == OrientPos ) ? 1 : 0 ];
// Compiler bug: Without this assertion, this code produces undefined result in Debug-x86.
CudaAssert( face >= 0 );
}
vertTetArr[ idx ] = tetIdx + face;
}
}
__global__ void kerInitPointLocationFast
(
int* vertTetArr,
Tet tet,
int tetIdx
)
{
initPointLocation<true>( vertTetArr, tet, tetIdx );
}
__global__ void kerInitPointLocationExact
(
int* vertTetArr,
Tet tet,
int tetIdx
)
{
initPointLocation<false>( vertTetArr, tet, tetIdx );
}
__forceinline__ __device__ float hash( int k )
{
k *= 357913941;
k ^= k << 24;
k += ~357913941;
k ^= k >> 31;
k ^= k << 31;
return int_as_float( k );
}
__global__ void
kerVoteForPoint
(
KerIntArray vertexArr,
int* vertexTetArr,
Tet* tetArr,
int* vertSphereArr,
int* tetSphereArr,
InsertionRule insRule
)
{
// Iterate uninserted points
for ( int idx = getCurThreadIdx(); idx < vertexArr._num; idx += getThreadNum() )
{
//*** Compute insphere value
const int tetIdx = vertexTetArr[ idx ];
const Tet tet = tetArr[ tetIdx ];
const int vert = vertexArr._arr[ idx ];
float sval;
switch ( insRule )
{
case InsCircumcenter:
sval = dPredWrapper.inSphereDet( tet, vert );
break;
case InsCentroid:
sval = dPredWrapper.distToCentroid( tet, vert );
break;
case InsRandom:
sval = hash(vert);
break;
}
//*** Sanitize and store sphere value
if ( sval < 0 )
sval = 0;
int ival = __float_as_int(sval);
vertSphereArr[ idx ] = ival;
//*** Vote
if ( tetSphereArr[ tetIdx ] < ival ) // Helps reduce atomicMax cost!
atomicMax( &tetSphereArr[ tetIdx ], ival );
}
return;
}
template < bool doFast >
__forceinline__ __device__ void
splitPoints
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToVert,
Tet* tetArr,
char* tetInfoArr,
KerIntArray freeArr
)
{
// Iterate uninserted points
for ( int vertIdx = getCurThreadIdx(); vertIdx < vertexArr._num; vertIdx += getThreadNum() )
{
int tetIdx = vertexTetArr[ vertIdx ];
if ( doFast && tetIdx < 0 ) continue; // This vertex is inserted.
if ( !doFast && tetIdx >= 0 ) continue; // Exact mode, vertex already processed in fast mode
if ( !doFast )
tetIdx = makePositive( tetIdx ); // Exact mode, vertex needs processing
const int splitVertIdx = tetToVert[ tetIdx ];
if ( !doFast && splitVertIdx == vertIdx ) continue; // This vertex is the inserting one
if ( splitVertIdx == INT_MAX ) // Tet not split, nothing to update
{
setTetEmptyState( tetInfoArr[ tetIdx ], false ); // 'cause this may be due to insertion control
continue; // Vertex's tetra will not be split in this round
}
const int vertex = vertexArr._arr[ vertIdx ];
const Point3 ptVertex = dPredWrapper.getPoint( vertex );
const int splitVertex = vertexArr._arr[ splitVertIdx ];
const Tet tet = loadTet( tetArr, tetIdx );
const int freeIdx = ( splitVertex + 1 ) * MeanVertDegree - 1;
const int tetVert[5] = { tet._v[0], tet._v[1], tet._v[2], tet._v[3], splitVertex };
const Point3 pt[] = {
dPredWrapper.getPoint( tetVert[0] ),
dPredWrapper.getPoint( tetVert[1] ),
dPredWrapper.getPoint( tetVert[2] ),
dPredWrapper.getPoint( tetVert[3] ),
dPredWrapper.getPoint( tetVert[4] )
};
int face = 0;
for ( int i = 0; i < 3; ++i )
{
const int *fv = SplitFaces[ face ];
Orient ort = ( doFast )
? dPredWrapper.doOrient3DFast(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], vertex,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex )
: dPredWrapper.doOrient3DSoS(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], vertex,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex );
// Needs exact computation
if ( doFast && (ort == OrientZero) ) { face = makeNegative( tetIdx ); break; }
face = SplitNext[ face ][ ( ort == OrientPos ) ? 0 : 1 ];
}
if ( face >= 0 )
{
face = freeArr._arr[ freeIdx - (face - 7) ];
setTetEmptyState( tetInfoArr[ face ], false );
}
vertexTetArr[ vertIdx ] = face;
}
return;
}
__global__ void
kerSplitPointsFast
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToVert,
Tet* tetArr,
char* tetInfoArr,
KerIntArray freeArr)
{
splitPoints< true >(
vertexArr,
vertexTetArr,
tetToVert,
tetArr,
tetInfoArr,
freeArr
);
}
__global__ void
kerSplitPointsExactSoS
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToVert,
Tet* tetArr,
char* tetInfoArr,
KerIntArray freeArr
)
{
splitPoints< false >(
vertexArr,
vertexTetArr,
tetToVert,
tetArr,
tetInfoArr,
freeArr
);
}
__forceinline__ __device__ void
voteForFlip32
(
int* tetVoteArr,
int voteOffset,
int botTi,
int topTi,
int sideTi
)
{
const int voteVal = voteOffset + botTi;
atomicMin( &tetVoteArr[ botTi ], voteVal );
atomicMin( &tetVoteArr[ topTi ], voteVal );
atomicMin( &tetVoteArr[ sideTi ], voteVal );
}
__forceinline__ __device__ void
voteForFlip23
(
int* tetVoteArr,
int voteOffset,
int botTi,
int topTi
)
{
const int voteVal = voteOffset + botTi;
atomicMin( &tetVoteArr[ botTi ], voteVal );
atomicMin( &tetVoteArr[ topTi ], voteVal );
}
extern __shared__ int2 s_exactCheck[];
template< typename T >
__forceinline__ __device__ void writeShared
(
T* s_input,
int& s_offset,
int& s_num,
T* output,
int& g_counter
)
{
int writeNum = ( s_num >= BLOCK_DIM ) ? BLOCK_DIM : s_num;
if ( THREAD_IDX == 0 )
s_offset = atomicAdd( &g_counter, writeNum );
__syncthreads();
if ( THREAD_IDX < writeNum )
output[ s_offset + THREAD_IDX ] = s_input[ THREAD_IDX ];
if ( THREAD_IDX < s_num - BLOCK_DIM )
s_input[ THREAD_IDX ] = s_input[ BLOCK_DIM + THREAD_IDX ];
__syncthreads();
if ( THREAD_IDX == 0 )
s_num -= writeNum;
__syncthreads();
}
template < CheckDelaunayMode checkMode >
__forceinline__ __device__ void
checkDelaunayFast
(
KerIntArray actTetVec,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int2* exactCheckVi,
int* counterArr,
int voteOffset
)
{
__shared__ int s_num, s_offset;
int actTetNumRounded = actTetVec._num;
if ( SphereExactOrientSoS == checkMode )
{
if ( THREAD_IDX == 0 )
s_num = 0;
actTetNumRounded = roundUp( actTetVec._num, BLOCK_DIM );
__syncthreads();
}
// Iterate active tetra
for ( int idx = getCurThreadIdx(); idx < actTetNumRounded; idx += getThreadNum() )
{
if ( SphereExactOrientSoS != checkMode || idx < actTetVec._num )
{
voteArr[ idx ] = -1;
const int botTi = actTetVec._arr[ idx ];
if ( !isTetAlive( tetInfoArr[ botTi ] ) )
actTetVec._arr[ idx ] = -1;
else
{
////
// Quickly load four neighbors' opp verts and status
////
TetOpp botOpp = loadOpp( oppArr, botTi );
int oppVert[4];
for ( int botVi = 0; botVi < 4; ++botVi )
{
int topVert = -1;
// No neighbour at this face or face is internal (i.e. already locally Delaunay)
if ( /*-1 != botOpp._t[ botVi ] &&*/ !botOpp.isOppInternal( botVi ) )
{
const int topTi = botOpp.getOppTet( botVi );
const int topVi = botOpp.getOppVi( botVi );
topVert = tetArr[ topTi ]._v[ topVi ];
if ( ( ( topTi < botTi ) && Changed == getTetCheckState( tetInfoArr[ topTi ] ) ) )
topVert = makeNegative( topVert );
}
oppVert[ botVi ] = topVert;
}
////
// Check flipping configuration
////
int checkVi = 1;
//int skip = 0;
for ( int botVi = 0; botVi < 4; ++botVi )
{
// TODO: Figure why this skipping thing doesn't work.
// Some facets are left unchecked and unmarked with sphere failure.
// Hint: From 3-2 flippable flip becomes 2-2 unflippable.
//if ( isBitSet( skip, botVi ) ) continue;
const int topVert = oppVert[ botVi ];
if ( topVert < 0 ) continue;
//*** Check for 3-2 flip
const int* botOrdVi = TetViAsSeenFrom[ botVi ]; // Order bottom tetra as seen from apex vertex
int i = 0;
for ( ; i < 3; ++i ) // Check 3 sides of bottom-top tetra
{
const int sideVert = oppVert[ botOrdVi[ i ] ];
// More than 3 tetra around edge
if ( sideVert != topVert && sideVert != makeNegative( topVert ) ) continue;
// 3-2 flip is possible.
//setBitState( skip, botOrdVi[ i ], true );
break;
}
checkVi = (checkVi << 4) | botVi | ( i << 2 );
}
if ( checkVi != 1 ) // Anything to check?
{
////
// Do sphere check
////
const Tet botTet = loadTet(tetArr, botTi );
const Point3 botP[4] = {
dPredWrapper.getPoint( botTet._v[0] ),
dPredWrapper.getPoint( botTet._v[1] ),
dPredWrapper.getPoint( botTet._v[2] ),
dPredWrapper.getPoint( botTet._v[3] )
}; // Cache in local mem
int check23 = 1;
int exactVi = 1;
bool hasFlip = false;
// Check 2-3 flips
for ( ; checkVi > 1; checkVi >>= 4 )
{
const int botVi = ( checkVi & 3 );
int botCorOrdVi = ( checkVi >> 2 ) & 3;
const int topVert = oppVert[ botVi ];
const Point3 topP = dPredWrapper.getPoint( topVert );
const Side side = dPredWrapper.doInSphereFast( botTet, topVert, botP, topP );
if ( SideZero == side )
if ( checkMode == SphereFastOrientFast ) // Store for future exact mode
botOpp.setOppSpecial( botVi, true );
else // Pass to next kernel - exact kernel
exactVi = (exactVi << 5) | ( botVi << 1 ) | ( botCorOrdVi << 3 ) | 0;
if ( SideIn != side ) continue; // No insphere failure at this face
// We have insphere failure
botOpp.setOppSphereFail( botVi );
if ( botCorOrdVi < 3 ) // 3-2 flipping is possible
{
//*** 3-2 flip confirmed
char flipInfo = makeFlip( botVi, botCorOrdVi );
voteArr[ idx ] = makeVoteVal( botTi, flipInfo );
const int botCorVi = TetViAsSeenFrom[ botVi ][ botCorOrdVi ];
const int botOppTi = botOpp.getOppTet( botCorVi ); // Side tetra as seen from bottom and top tetra
const int topTi = botOpp.getOppTet( botVi );
voteForFlip32( tetVoteArr, voteOffset, botTi, topTi, botOppTi );
hasFlip = true;
check23 = 1; // No more need to check 2-3
break;
}
// Postpone check for 2-3 flippability
check23 = ( check23 << 2 ) | botVi;
}
//*** Try for 2-3 flip
for ( ; check23 > 1; check23 >>= 2 )
{
const int botVi = ( check23 & 3 );
const int topVert = oppVert[ botVi ];
const Point3 topP = dPredWrapper.getPoint( topVert );
const int* botOrdVi = TetViAsSeenFrom[ botVi ]; // Order bottom tetra as seen from apex vertex
hasFlip = true;
// Go around bottom-top tetra, check 3 sides
for ( int i = 0; i < 3; ++i )
{
const int* fv = TetViAsSeenFrom[ botOrdVi[i] ];
Orient ort = dPredWrapper.doOrient3DFast(
botTet._v[ fv[0] ], botTet._v[ fv[1] ], botTet._v[ fv[2] ], topVert,
botP[ fv[0] ], botP[ fv[1] ], botP[ fv[2] ], topP );
if ( OrientZero == ort )
if ( checkMode == SphereFastOrientFast )
// Store for future exact mode
botOpp.setOppSpecial( botVi, true );
else
// Pass to next kernel - exact kernel
exactVi = (exactVi << 5) | ( botVi << 1 ) | ( 3 << 3 ) | 1;
if ( OrientPos != ort )
{
hasFlip = false;
break; // Cannot do 23 flip
}
}
if ( hasFlip ) //*** 2-3 flip possible!
{
const char flipInfo = makeFlip( botVi, 3 );
voteArr[ idx ] = makeVoteVal( botTi, flipInfo );
const int topTi = botOpp.getOppTet( botVi );
voteForFlip23( tetVoteArr, voteOffset, botTi, topTi );
break;
}
} // Check faces of tetra
storeOpp( oppArr, botTi, botOpp );
if ( ( checkMode == SphereExactOrientSoS ) && ( !hasFlip ) && ( exactVi != 1 ) )
{
#if __CUDA_ARCH__ >= 120
const int checkIdx = atomicAdd( &s_num, 1 );
s_exactCheck[ checkIdx ] = make_int2( idx, exactVi );
#else
const int checkIdx = atomicAdd( &counterArr[ CounterExact ], 1 );
exactCheckVi[ checkIdx ] = make_int2( idx, exactVi );
#endif
}
}
}
}
#if __CUDA_ARCH__ >= 120
if ( SphereExactOrientSoS == checkMode )
{
__syncthreads();
// Output to global mem
if ( s_num >= BLOCK_DIM )
writeShared( s_exactCheck, s_offset, s_num,
exactCheckVi, counterArr[ CounterExact ] );
}
#endif
}
#if __CUDA_ARCH__ >= 120
if ( SphereExactOrientSoS == checkMode && s_num > 0 ) // Output to global mem
writeShared( s_exactCheck, s_offset, s_num,
exactCheckVi, counterArr[ CounterExact ] );
#endif
if ( blockIdx.x == 0 && threadIdx.x == 0 )
{
counterArr[ CounterFlip ] = 0;
}
return;
}
__global__ void
kerCheckDelaunayFast
(
KerIntArray actTetVec,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int* counterArr,
int voteOffset
)
{
checkDelaunayFast< SphereFastOrientFast >(
actTetVec,
tetArr,
oppArr,
tetInfoArr,
tetVoteArr,
voteArr,
NULL,
counterArr,
voteOffset
);
return;
}
__global__ void
kerCheckDelaunayExact_Fast
(
KerIntArray actTetVec,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int2* exactCheckVi,
int* counterArr,
int voteOffset
)
{
checkDelaunayFast< SphereExactOrientSoS >(
actTetVec,
tetArr,
oppArr,
tetInfoArr,
tetVoteArr,
voteArr,
exactCheckVi,
counterArr,
voteOffset
);
return;
}
__global__ void
__launch_bounds__( PRED_THREADS_PER_BLOCK )
kerCheckDelaunayExact_Exact
(
int* actTetArr,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int2* exactCheckVi,
int* counterArr,
int voteOffset
)
{
const int exactNum = counterArr[ CounterExact ];
// Iterate active tetra
for ( int idx = getCurThreadIdx(); idx < exactNum; idx += getThreadNum() )
{
int2 val = exactCheckVi[ idx ];
int botTi = actTetArr[ val.x ];
int exactVi = val.y;
exactCheckVi[ idx ] = make_int2( -1, -1 );
////
// Do sphere check
////
TetOpp botOpp = loadOpp( oppArr, botTi );
const Tet botTet = loadTet( tetArr, botTi );
const Point3 botP[4] = {
dPredWrapper.getPoint( botTet._v[0] ),
dPredWrapper.getPoint( botTet._v[1] ),
dPredWrapper.getPoint( botTet._v[2] ),
dPredWrapper.getPoint( botTet._v[3] )
};
// Check 2-3 flips
for ( ; exactVi > 1; exactVi >>= 5 )
{
const int botVi = ( exactVi >> 1 ) & 3;
int botCorOrdVi = ( exactVi >> 3 ) & 3;
const int topTi = botOpp.getOppTet( botVi );
const int topVi = botOpp.getOppVi( botVi );
const int topVert = tetArr[ topTi ]._v[ topVi ];
const Point3 topP = dPredWrapper.getPoint( topVert );
if ( ( exactVi & 1 ) == 0 )
{
const Side side = dPredWrapper.doInSphereSoS( botTet, topVert, botP, topP );
if ( SideIn != side ) continue; // No insphere failure at this face
}
botOpp.setOppSphereFail( botVi );
// We have insphere failure, determine kind of flip
const FlipType flipType = ( 3 == botCorOrdVi ? Flip23 : Flip32 );
//*** Try for 3-2 flip
const int* botOrdVi = TetViAsSeenFrom[ botVi ]; // Order bottom tetra as seen from apex vertex
if ( Flip32 == flipType ) // 3-2 flipping is possible
{
//*** 3-2 flip confirmed
const int botCorVi = botOrdVi[ botCorOrdVi ];
const int botOppTi = botOpp.getOppTet( botCorVi ); // Side tetra as seen from bottom and top tetra
voteForFlip32( tetVoteArr, voteOffset, botTi, topTi, botOppTi );
char flipInfo = makeFlip( botVi, botCorOrdVi );
voteArr[ val.x ] = makeVoteVal( botTi, flipInfo );
break;
}
// Try flip 2-3
bool hasFlip = true;
// Go around bottom-top tetra, check 3 sides
for ( int i = 0; i < 3; ++i )
{
const int botCorVi = botOrdVi[i];
const int* fv = TetViAsSeenFrom[ botCorVi ];
const Orient ort = dPredWrapper.doOrient3DSoS(
botTet._v[ fv[0] ], botTet._v[ fv[1] ], botTet._v[ fv[2] ], topVert,
botP[ fv[0] ], botP[ fv[1] ], botP[ fv[2] ], topP );
if ( OrientPos != ort )
{
hasFlip = false;
break; // Cannot do 23 flip
}
}
if ( hasFlip )
{
voteForFlip23( tetVoteArr, voteOffset, botTi, topTi );
const char flipInfo = makeFlip( botVi, 3 );
voteArr[ val.x ] = makeVoteVal( botTi, flipInfo );
break;
}
} // Check faces of tetra
storeOpp( oppArr, botTi, botOpp );
}
return;
}
__device__ int setNeedExact( int val )
{
return val | ( 1 << 31 );
}
__device__ int removeExactBit( int val )
{
return ( val & ~(1 << 31) );
}
__device__ bool isNeedExact( int val )
{
return ( val >> 31 ) & 1;
}
template<bool doFast>
__forceinline__ __device__ void
relocatePoints
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToFlip,
FlipItem* flipArr
)
{
// Iterate uninserted points
for ( int vertIdx = getCurThreadIdx(); vertIdx < vertexArr._num; vertIdx += getThreadNum() )
{
const int tetIdxVal = vertexTetArr[ vertIdx ];
if ( !doFast && !isNeedExact( tetIdxVal ) ) continue;
const int tetIdx = removeExactBit( tetIdxVal );
int nextIdx = ( doFast ) ? tetToFlip[ tetIdx ] : tetIdx;
if ( nextIdx == -1 )
continue;
const int vertex = vertexArr._arr[ vertIdx ];
int flag = nextIdx & 1;
int destIdx = nextIdx >> 1;
while ( flag == 1 )
{
const FlipItem flipItem = loadFlip( flipArr, destIdx );
const FlipType fType = ( flipItem._t[ 2 ] < 0 ? Flip32 : Flip23 );
int nextLocId;
int3 F;
if ( Flip23 == fType )
F = make_int3( 0, 2, 3 );
else
F = make_int3( 0, 1, 2 );
const Orient ord0 = doFast
? dPredWrapper.doOrient3DFast( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex )
: dPredWrapper.doOrient3DSoS( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex );
if ( doFast && ( OrientZero == ord0 ) )
{
destIdx = setNeedExact( nextIdx );
break;
}
if ( Flip32 == fType )
{
nextLocId = ( OrientPos == ord0 ) ? 0 : 1;
}
else
{
if ( OrientPos == ord0 )
{
nextLocId = 0;
F = make_int3( 0, 3, 1 );
}
else
{
nextLocId = 1;
F = make_int3( 0, 4, 3 );
}
//right = 2;
const Orient ord1 = doFast
? dPredWrapper.doOrient3DFast( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex )
: dPredWrapper.doOrient3DSoS( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex );
if ( doFast && ( OrientZero == ord1 ) )
{
destIdx = setNeedExact( nextIdx );
break;
}
else
nextLocId = ( OrientPos == ord1 ) ? nextLocId : 2;
}
nextIdx = flipItem._t[ nextLocId ];
flag = nextIdx & 1;
destIdx = nextIdx >> 1;
}
vertexTetArr[ vertIdx ] = destIdx; // Write back
}
return;
}
__global__ void
kerRelocatePointsFast
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToFlip,
FlipItem* flipArr
)
{
relocatePoints<true>(
vertexArr,
vertexTetArr,
tetToFlip,
flipArr
);
}
__global__ void
kerRelocatePointsExact
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToFlip,
FlipItem* flipArr
)
{
relocatePoints<false>(
vertexArr,
vertexTetArr,
tetToFlip,
flipArr
);
}
| 9ab134b324dd3ceae32df337c3abb66c60f580ee.cu | /*
Author: Cao Thanh Tung, Ashwin Nanjappa
Date: 05-Aug-2014
===============================================================================
Copyright (c) 2011, School of Computing, National University of Singapore.
All rights reserved.
Project homepage: http://www.comp.nus.edu.sg/~tants/gdel3d.html
If you use gDel3D and you like it or have comments on its usefulness etc., we
would love to hear from you at <tants@comp.nus.edu.sg>. You may share with us
your experience and any possibilities that we may improve the work/code.
===============================================================================
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include "HostToKernel.h"
#include "KerCommon.h"
#include "KerPredicates.h"
#include "DPredWrapper.h"
#ifndef __CUDACC__
#define __launch_bounds__( x )
#endif
__constant__ DPredWrapper dPredWrapper;
#include "KerPredWrapper.h"
void setPredWrapperConstant( const DPredWrapper &hostPredWrapper )
{
CudaSafeCall( cudaMemcpyToSymbol( dPredWrapper, &hostPredWrapper, sizeof( hostPredWrapper ) ) );
}
template<bool doFast>
__forceinline__ __device__ void initPointLocation
(
int* vertTetArr,
Tet tet,
int tetIdx
)
{
const int tetVert[5] = { tet._v[0], tet._v[1], tet._v[2], tet._v[3], dPredWrapper._infIdx };
const Point3 pt[] = {
dPredWrapper.getPoint( tetVert[0] ),
dPredWrapper.getPoint( tetVert[1] ),
dPredWrapper.getPoint( tetVert[2] ),
dPredWrapper.getPoint( tetVert[3] ),
dPredWrapper.getPoint( tetVert[4] )
};
// Iterate points
for ( int idx = getCurThreadIdx(); idx < dPredWrapper.pointNum(); idx += getThreadNum() )
{
if ( !doFast && vertTetArr[ idx ] != -2 ) // No exact check needed
continue;
if ( tet.has( idx ) || idx == dPredWrapper._infIdx ) // Already inserted
{
vertTetArr[ idx ] = -1;
continue;
}
Point3 ptVertex = dPredWrapper.getPoint( idx );
int face = 0;
for ( int i = 0; i < 4; ++i )
{
const int *fv = SplitFaces[ face ];
Orient ort = ( doFast )
? dPredWrapper.doOrient3DFast(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], idx,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex )
: dPredWrapper.doOrient3DSoS(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], idx,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex );
if ( doFast && (ort == OrientZero) ) { face = -tetIdx - 2; break; } // Needs exact computation
// Use the reverse direction 'cause the splitting point is Infty!
face = SplitNext[ face ][ ( ort == OrientPos ) ? 1 : 0 ];
// Compiler bug: Without this assertion, this code produces undefined result in Debug-x86.
CudaAssert( face >= 0 );
}
vertTetArr[ idx ] = tetIdx + face;
}
}
__global__ void kerInitPointLocationFast
(
int* vertTetArr,
Tet tet,
int tetIdx
)
{
initPointLocation<true>( vertTetArr, tet, tetIdx );
}
__global__ void kerInitPointLocationExact
(
int* vertTetArr,
Tet tet,
int tetIdx
)
{
initPointLocation<false>( vertTetArr, tet, tetIdx );
}
__forceinline__ __device__ float hash( int k )
{
k *= 357913941;
k ^= k << 24;
k += ~357913941;
k ^= k >> 31;
k ^= k << 31;
return int_as_float( k );
}
__global__ void
kerVoteForPoint
(
KerIntArray vertexArr,
int* vertexTetArr,
Tet* tetArr,
int* vertSphereArr,
int* tetSphereArr,
InsertionRule insRule
)
{
// Iterate uninserted points
for ( int idx = getCurThreadIdx(); idx < vertexArr._num; idx += getThreadNum() )
{
//*** Compute insphere value
const int tetIdx = vertexTetArr[ idx ];
const Tet tet = tetArr[ tetIdx ];
const int vert = vertexArr._arr[ idx ];
float sval;
switch ( insRule )
{
case InsCircumcenter:
sval = dPredWrapper.inSphereDet( tet, vert );
break;
case InsCentroid:
sval = dPredWrapper.distToCentroid( tet, vert );
break;
case InsRandom:
sval = hash(vert);
break;
}
//*** Sanitize and store sphere value
if ( sval < 0 )
sval = 0;
int ival = __float_as_int(sval);
vertSphereArr[ idx ] = ival;
//*** Vote
if ( tetSphereArr[ tetIdx ] < ival ) // Helps reduce atomicMax cost!
atomicMax( &tetSphereArr[ tetIdx ], ival );
}
return;
}
template < bool doFast >
__forceinline__ __device__ void
splitPoints
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToVert,
Tet* tetArr,
char* tetInfoArr,
KerIntArray freeArr
)
{
// Iterate uninserted points
for ( int vertIdx = getCurThreadIdx(); vertIdx < vertexArr._num; vertIdx += getThreadNum() )
{
int tetIdx = vertexTetArr[ vertIdx ];
if ( doFast && tetIdx < 0 ) continue; // This vertex is inserted.
if ( !doFast && tetIdx >= 0 ) continue; // Exact mode, vertex already processed in fast mode
if ( !doFast )
tetIdx = makePositive( tetIdx ); // Exact mode, vertex needs processing
const int splitVertIdx = tetToVert[ tetIdx ];
if ( !doFast && splitVertIdx == vertIdx ) continue; // This vertex is the inserting one
if ( splitVertIdx == INT_MAX ) // Tet not split, nothing to update
{
setTetEmptyState( tetInfoArr[ tetIdx ], false ); // 'cause this may be due to insertion control
continue; // Vertex's tetra will not be split in this round
}
const int vertex = vertexArr._arr[ vertIdx ];
const Point3 ptVertex = dPredWrapper.getPoint( vertex );
const int splitVertex = vertexArr._arr[ splitVertIdx ];
const Tet tet = loadTet( tetArr, tetIdx );
const int freeIdx = ( splitVertex + 1 ) * MeanVertDegree - 1;
const int tetVert[5] = { tet._v[0], tet._v[1], tet._v[2], tet._v[3], splitVertex };
const Point3 pt[] = {
dPredWrapper.getPoint( tetVert[0] ),
dPredWrapper.getPoint( tetVert[1] ),
dPredWrapper.getPoint( tetVert[2] ),
dPredWrapper.getPoint( tetVert[3] ),
dPredWrapper.getPoint( tetVert[4] )
};
int face = 0;
for ( int i = 0; i < 3; ++i )
{
const int *fv = SplitFaces[ face ];
Orient ort = ( doFast )
? dPredWrapper.doOrient3DFast(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], vertex,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex )
: dPredWrapper.doOrient3DSoS(
tetVert[ fv[0] ], tetVert[ fv[1] ], tetVert[ fv[2] ], vertex,
pt[ fv[0] ], pt[ fv[1] ], pt[ fv[2] ], ptVertex );
// Needs exact computation
if ( doFast && (ort == OrientZero) ) { face = makeNegative( tetIdx ); break; }
face = SplitNext[ face ][ ( ort == OrientPos ) ? 0 : 1 ];
}
if ( face >= 0 )
{
face = freeArr._arr[ freeIdx - (face - 7) ];
setTetEmptyState( tetInfoArr[ face ], false );
}
vertexTetArr[ vertIdx ] = face;
}
return;
}
__global__ void
kerSplitPointsFast
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToVert,
Tet* tetArr,
char* tetInfoArr,
KerIntArray freeArr)
{
splitPoints< true >(
vertexArr,
vertexTetArr,
tetToVert,
tetArr,
tetInfoArr,
freeArr
);
}
__global__ void
kerSplitPointsExactSoS
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToVert,
Tet* tetArr,
char* tetInfoArr,
KerIntArray freeArr
)
{
splitPoints< false >(
vertexArr,
vertexTetArr,
tetToVert,
tetArr,
tetInfoArr,
freeArr
);
}
__forceinline__ __device__ void
voteForFlip32
(
int* tetVoteArr,
int voteOffset,
int botTi,
int topTi,
int sideTi
)
{
const int voteVal = voteOffset + botTi;
atomicMin( &tetVoteArr[ botTi ], voteVal );
atomicMin( &tetVoteArr[ topTi ], voteVal );
atomicMin( &tetVoteArr[ sideTi ], voteVal );
}
__forceinline__ __device__ void
voteForFlip23
(
int* tetVoteArr,
int voteOffset,
int botTi,
int topTi
)
{
const int voteVal = voteOffset + botTi;
atomicMin( &tetVoteArr[ botTi ], voteVal );
atomicMin( &tetVoteArr[ topTi ], voteVal );
}
extern __shared__ int2 s_exactCheck[];
template< typename T >
__forceinline__ __device__ void writeShared
(
T* s_input,
int& s_offset,
int& s_num,
T* output,
int& g_counter
)
{
int writeNum = ( s_num >= BLOCK_DIM ) ? BLOCK_DIM : s_num;
if ( THREAD_IDX == 0 )
s_offset = atomicAdd( &g_counter, writeNum );
__syncthreads();
if ( THREAD_IDX < writeNum )
output[ s_offset + THREAD_IDX ] = s_input[ THREAD_IDX ];
if ( THREAD_IDX < s_num - BLOCK_DIM )
s_input[ THREAD_IDX ] = s_input[ BLOCK_DIM + THREAD_IDX ];
__syncthreads();
if ( THREAD_IDX == 0 )
s_num -= writeNum;
__syncthreads();
}
template < CheckDelaunayMode checkMode >
__forceinline__ __device__ void
checkDelaunayFast
(
KerIntArray actTetVec,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int2* exactCheckVi,
int* counterArr,
int voteOffset
)
{
__shared__ int s_num, s_offset;
int actTetNumRounded = actTetVec._num;
if ( SphereExactOrientSoS == checkMode )
{
if ( THREAD_IDX == 0 )
s_num = 0;
actTetNumRounded = roundUp( actTetVec._num, BLOCK_DIM );
__syncthreads();
}
// Iterate active tetra
for ( int idx = getCurThreadIdx(); idx < actTetNumRounded; idx += getThreadNum() )
{
if ( SphereExactOrientSoS != checkMode || idx < actTetVec._num )
{
voteArr[ idx ] = -1;
const int botTi = actTetVec._arr[ idx ];
if ( !isTetAlive( tetInfoArr[ botTi ] ) )
actTetVec._arr[ idx ] = -1;
else
{
////
// Quickly load four neighbors' opp verts and status
////
TetOpp botOpp = loadOpp( oppArr, botTi );
int oppVert[4];
for ( int botVi = 0; botVi < 4; ++botVi )
{
int topVert = -1;
// No neighbour at this face or face is internal (i.e. already locally Delaunay)
if ( /*-1 != botOpp._t[ botVi ] &&*/ !botOpp.isOppInternal( botVi ) )
{
const int topTi = botOpp.getOppTet( botVi );
const int topVi = botOpp.getOppVi( botVi );
topVert = tetArr[ topTi ]._v[ topVi ];
if ( ( ( topTi < botTi ) && Changed == getTetCheckState( tetInfoArr[ topTi ] ) ) )
topVert = makeNegative( topVert );
}
oppVert[ botVi ] = topVert;
}
////
// Check flipping configuration
////
int checkVi = 1;
//int skip = 0;
for ( int botVi = 0; botVi < 4; ++botVi )
{
// TODO: Figure why this skipping thing doesn't work.
// Some facets are left unchecked and unmarked with sphere failure.
// Hint: From 3-2 flippable flip becomes 2-2 unflippable.
//if ( isBitSet( skip, botVi ) ) continue;
const int topVert = oppVert[ botVi ];
if ( topVert < 0 ) continue;
//*** Check for 3-2 flip
const int* botOrdVi = TetViAsSeenFrom[ botVi ]; // Order bottom tetra as seen from apex vertex
int i = 0;
for ( ; i < 3; ++i ) // Check 3 sides of bottom-top tetra
{
const int sideVert = oppVert[ botOrdVi[ i ] ];
// More than 3 tetra around edge
if ( sideVert != topVert && sideVert != makeNegative( topVert ) ) continue;
// 3-2 flip is possible.
//setBitState( skip, botOrdVi[ i ], true );
break;
}
checkVi = (checkVi << 4) | botVi | ( i << 2 );
}
if ( checkVi != 1 ) // Anything to check?
{
////
// Do sphere check
////
const Tet botTet = loadTet(tetArr, botTi );
const Point3 botP[4] = {
dPredWrapper.getPoint( botTet._v[0] ),
dPredWrapper.getPoint( botTet._v[1] ),
dPredWrapper.getPoint( botTet._v[2] ),
dPredWrapper.getPoint( botTet._v[3] )
}; // Cache in local mem
int check23 = 1;
int exactVi = 1;
bool hasFlip = false;
// Check 2-3 flips
for ( ; checkVi > 1; checkVi >>= 4 )
{
const int botVi = ( checkVi & 3 );
int botCorOrdVi = ( checkVi >> 2 ) & 3;
const int topVert = oppVert[ botVi ];
const Point3 topP = dPredWrapper.getPoint( topVert );
const Side side = dPredWrapper.doInSphereFast( botTet, topVert, botP, topP );
if ( SideZero == side )
if ( checkMode == SphereFastOrientFast ) // Store for future exact mode
botOpp.setOppSpecial( botVi, true );
else // Pass to next kernel - exact kernel
exactVi = (exactVi << 5) | ( botVi << 1 ) | ( botCorOrdVi << 3 ) | 0;
if ( SideIn != side ) continue; // No insphere failure at this face
// We have insphere failure
botOpp.setOppSphereFail( botVi );
if ( botCorOrdVi < 3 ) // 3-2 flipping is possible
{
//*** 3-2 flip confirmed
char flipInfo = makeFlip( botVi, botCorOrdVi );
voteArr[ idx ] = makeVoteVal( botTi, flipInfo );
const int botCorVi = TetViAsSeenFrom[ botVi ][ botCorOrdVi ];
const int botOppTi = botOpp.getOppTet( botCorVi ); // Side tetra as seen from bottom and top tetra
const int topTi = botOpp.getOppTet( botVi );
voteForFlip32( tetVoteArr, voteOffset, botTi, topTi, botOppTi );
hasFlip = true;
check23 = 1; // No more need to check 2-3
break;
}
// Postpone check for 2-3 flippability
check23 = ( check23 << 2 ) | botVi;
}
//*** Try for 2-3 flip
for ( ; check23 > 1; check23 >>= 2 )
{
const int botVi = ( check23 & 3 );
const int topVert = oppVert[ botVi ];
const Point3 topP = dPredWrapper.getPoint( topVert );
const int* botOrdVi = TetViAsSeenFrom[ botVi ]; // Order bottom tetra as seen from apex vertex
hasFlip = true;
// Go around bottom-top tetra, check 3 sides
for ( int i = 0; i < 3; ++i )
{
const int* fv = TetViAsSeenFrom[ botOrdVi[i] ];
Orient ort = dPredWrapper.doOrient3DFast(
botTet._v[ fv[0] ], botTet._v[ fv[1] ], botTet._v[ fv[2] ], topVert,
botP[ fv[0] ], botP[ fv[1] ], botP[ fv[2] ], topP );
if ( OrientZero == ort )
if ( checkMode == SphereFastOrientFast )
// Store for future exact mode
botOpp.setOppSpecial( botVi, true );
else
// Pass to next kernel - exact kernel
exactVi = (exactVi << 5) | ( botVi << 1 ) | ( 3 << 3 ) | 1;
if ( OrientPos != ort )
{
hasFlip = false;
break; // Cannot do 23 flip
}
}
if ( hasFlip ) //*** 2-3 flip possible!
{
const char flipInfo = makeFlip( botVi, 3 );
voteArr[ idx ] = makeVoteVal( botTi, flipInfo );
const int topTi = botOpp.getOppTet( botVi );
voteForFlip23( tetVoteArr, voteOffset, botTi, topTi );
break;
}
} // Check faces of tetra
storeOpp( oppArr, botTi, botOpp );
if ( ( checkMode == SphereExactOrientSoS ) && ( !hasFlip ) && ( exactVi != 1 ) )
{
#if __CUDA_ARCH__ >= 120
const int checkIdx = atomicAdd( &s_num, 1 );
s_exactCheck[ checkIdx ] = make_int2( idx, exactVi );
#else
const int checkIdx = atomicAdd( &counterArr[ CounterExact ], 1 );
exactCheckVi[ checkIdx ] = make_int2( idx, exactVi );
#endif
}
}
}
}
#if __CUDA_ARCH__ >= 120
if ( SphereExactOrientSoS == checkMode )
{
__syncthreads();
// Output to global mem
if ( s_num >= BLOCK_DIM )
writeShared( s_exactCheck, s_offset, s_num,
exactCheckVi, counterArr[ CounterExact ] );
}
#endif
}
#if __CUDA_ARCH__ >= 120
if ( SphereExactOrientSoS == checkMode && s_num > 0 ) // Output to global mem
writeShared( s_exactCheck, s_offset, s_num,
exactCheckVi, counterArr[ CounterExact ] );
#endif
if ( blockIdx.x == 0 && threadIdx.x == 0 )
{
counterArr[ CounterFlip ] = 0;
}
return;
}
__global__ void
kerCheckDelaunayFast
(
KerIntArray actTetVec,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int* counterArr,
int voteOffset
)
{
checkDelaunayFast< SphereFastOrientFast >(
actTetVec,
tetArr,
oppArr,
tetInfoArr,
tetVoteArr,
voteArr,
NULL,
counterArr,
voteOffset
);
return;
}
__global__ void
kerCheckDelaunayExact_Fast
(
KerIntArray actTetVec,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int2* exactCheckVi,
int* counterArr,
int voteOffset
)
{
checkDelaunayFast< SphereExactOrientSoS >(
actTetVec,
tetArr,
oppArr,
tetInfoArr,
tetVoteArr,
voteArr,
exactCheckVi,
counterArr,
voteOffset
);
return;
}
__global__ void
__launch_bounds__( PRED_THREADS_PER_BLOCK )
kerCheckDelaunayExact_Exact
(
int* actTetArr,
Tet* tetArr,
TetOpp* oppArr,
char* tetInfoArr,
int* tetVoteArr,
int* voteArr,
int2* exactCheckVi,
int* counterArr,
int voteOffset
)
{
const int exactNum = counterArr[ CounterExact ];
// Iterate active tetra
for ( int idx = getCurThreadIdx(); idx < exactNum; idx += getThreadNum() )
{
int2 val = exactCheckVi[ idx ];
int botTi = actTetArr[ val.x ];
int exactVi = val.y;
exactCheckVi[ idx ] = make_int2( -1, -1 );
////
// Do sphere check
////
TetOpp botOpp = loadOpp( oppArr, botTi );
const Tet botTet = loadTet( tetArr, botTi );
const Point3 botP[4] = {
dPredWrapper.getPoint( botTet._v[0] ),
dPredWrapper.getPoint( botTet._v[1] ),
dPredWrapper.getPoint( botTet._v[2] ),
dPredWrapper.getPoint( botTet._v[3] )
};
// Check 2-3 flips
for ( ; exactVi > 1; exactVi >>= 5 )
{
const int botVi = ( exactVi >> 1 ) & 3;
int botCorOrdVi = ( exactVi >> 3 ) & 3;
const int topTi = botOpp.getOppTet( botVi );
const int topVi = botOpp.getOppVi( botVi );
const int topVert = tetArr[ topTi ]._v[ topVi ];
const Point3 topP = dPredWrapper.getPoint( topVert );
if ( ( exactVi & 1 ) == 0 )
{
const Side side = dPredWrapper.doInSphereSoS( botTet, topVert, botP, topP );
if ( SideIn != side ) continue; // No insphere failure at this face
}
botOpp.setOppSphereFail( botVi );
// We have insphere failure, determine kind of flip
const FlipType flipType = ( 3 == botCorOrdVi ? Flip23 : Flip32 );
//*** Try for 3-2 flip
const int* botOrdVi = TetViAsSeenFrom[ botVi ]; // Order bottom tetra as seen from apex vertex
if ( Flip32 == flipType ) // 3-2 flipping is possible
{
//*** 3-2 flip confirmed
const int botCorVi = botOrdVi[ botCorOrdVi ];
const int botOppTi = botOpp.getOppTet( botCorVi ); // Side tetra as seen from bottom and top tetra
voteForFlip32( tetVoteArr, voteOffset, botTi, topTi, botOppTi );
char flipInfo = makeFlip( botVi, botCorOrdVi );
voteArr[ val.x ] = makeVoteVal( botTi, flipInfo );
break;
}
// Try flip 2-3
bool hasFlip = true;
// Go around bottom-top tetra, check 3 sides
for ( int i = 0; i < 3; ++i )
{
const int botCorVi = botOrdVi[i];
const int* fv = TetViAsSeenFrom[ botCorVi ];
const Orient ort = dPredWrapper.doOrient3DSoS(
botTet._v[ fv[0] ], botTet._v[ fv[1] ], botTet._v[ fv[2] ], topVert,
botP[ fv[0] ], botP[ fv[1] ], botP[ fv[2] ], topP );
if ( OrientPos != ort )
{
hasFlip = false;
break; // Cannot do 23 flip
}
}
if ( hasFlip )
{
voteForFlip23( tetVoteArr, voteOffset, botTi, topTi );
const char flipInfo = makeFlip( botVi, 3 );
voteArr[ val.x ] = makeVoteVal( botTi, flipInfo );
break;
}
} // Check faces of tetra
storeOpp( oppArr, botTi, botOpp );
}
return;
}
__device__ int setNeedExact( int val )
{
return val | ( 1 << 31 );
}
__device__ int removeExactBit( int val )
{
return ( val & ~(1 << 31) );
}
__device__ bool isNeedExact( int val )
{
return ( val >> 31 ) & 1;
}
template<bool doFast>
__forceinline__ __device__ void
relocatePoints
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToFlip,
FlipItem* flipArr
)
{
// Iterate uninserted points
for ( int vertIdx = getCurThreadIdx(); vertIdx < vertexArr._num; vertIdx += getThreadNum() )
{
const int tetIdxVal = vertexTetArr[ vertIdx ];
if ( !doFast && !isNeedExact( tetIdxVal ) ) continue;
const int tetIdx = removeExactBit( tetIdxVal );
int nextIdx = ( doFast ) ? tetToFlip[ tetIdx ] : tetIdx;
if ( nextIdx == -1 )
continue;
const int vertex = vertexArr._arr[ vertIdx ];
int flag = nextIdx & 1;
int destIdx = nextIdx >> 1;
while ( flag == 1 )
{
const FlipItem flipItem = loadFlip( flipArr, destIdx );
const FlipType fType = ( flipItem._t[ 2 ] < 0 ? Flip32 : Flip23 );
int nextLocId;
int3 F;
if ( Flip23 == fType )
F = make_int3( 0, 2, 3 );
else
F = make_int3( 0, 1, 2 );
const Orient ord0 = doFast
? dPredWrapper.doOrient3DFast( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex )
: dPredWrapper.doOrient3DSoS( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex );
if ( doFast && ( OrientZero == ord0 ) )
{
destIdx = setNeedExact( nextIdx );
break;
}
if ( Flip32 == fType )
{
nextLocId = ( OrientPos == ord0 ) ? 0 : 1;
}
else
{
if ( OrientPos == ord0 )
{
nextLocId = 0;
F = make_int3( 0, 3, 1 );
}
else
{
nextLocId = 1;
F = make_int3( 0, 4, 3 );
}
//right = 2;
const Orient ord1 = doFast
? dPredWrapper.doOrient3DFast( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex )
: dPredWrapper.doOrient3DSoS( flipItem._v[ F.x ], flipItem._v[ F.y ], flipItem._v[ F.z ], vertex );
if ( doFast && ( OrientZero == ord1 ) )
{
destIdx = setNeedExact( nextIdx );
break;
}
else
nextLocId = ( OrientPos == ord1 ) ? nextLocId : 2;
}
nextIdx = flipItem._t[ nextLocId ];
flag = nextIdx & 1;
destIdx = nextIdx >> 1;
}
vertexTetArr[ vertIdx ] = destIdx; // Write back
}
return;
}
__global__ void
kerRelocatePointsFast
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToFlip,
FlipItem* flipArr
)
{
relocatePoints<true>(
vertexArr,
vertexTetArr,
tetToFlip,
flipArr
);
}
__global__ void
kerRelocatePointsExact
(
KerIntArray vertexArr,
int* vertexTetArr,
int* tetToFlip,
FlipItem* flipArr
)
{
relocatePoints<false>(
vertexArr,
vertexTetArr,
tetToFlip,
flipArr
);
}
|
cdd96eead7e2965b91febb511a8e0a1daab82c5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void f1( float3* __restrict__ ptr ) {
float3 v = ptr[threadIdx.x];
v.x += 1;
v.y += 1;
v.z += 1;
ptr[threadIdx.x] = v;
}
__global__
void f2( float* __restrict__ ptr1, float* __restrict__ ptr2, float* __restrict__ ptr3 ) {
ptr1[threadIdx.x] += 1;
ptr2[threadIdx.x] += 1;
ptr3[threadIdx.x] += 1;
}
int main() {
float *some_ptr;
hipMalloc(&some_ptr, 96 * sizeof(float));
hipLaunchKernelGGL(( f1), dim3(1), dim3(32), 0, 0, (float3*) some_ptr);
hipLaunchKernelGGL(( f2), dim3(1), dim3(32), 0, 0, some_ptr, some_ptr+32, some_ptr+64);
}
| cdd96eead7e2965b91febb511a8e0a1daab82c5d.cu | __global__
void f1( float3* __restrict__ ptr ) {
float3 v = ptr[threadIdx.x];
v.x += 1;
v.y += 1;
v.z += 1;
ptr[threadIdx.x] = v;
}
__global__
void f2( float* __restrict__ ptr1, float* __restrict__ ptr2, float* __restrict__ ptr3 ) {
ptr1[threadIdx.x] += 1;
ptr2[threadIdx.x] += 1;
ptr3[threadIdx.x] += 1;
}
int main() {
float *some_ptr;
cudaMalloc(&some_ptr, 96 * sizeof(float));
f1<<<1, 32>>>((float3*) some_ptr);
f2<<<1, 32>>>(some_ptr, some_ptr+32, some_ptr+64);
}
|
230193c0d648685024ae9b3fcebe8289f9f57506.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_one_hot.h"
namespace anakin {
namespace saber {
template <>
SaberStatus SaberOneHot<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
OneHotParam<NV>& param, Context<NV>& ctx) {
return SaberSuccess;
}
template <>
SaberStatus SaberOneHot<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
OneHotParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
__global__ void fill_one_hot_kernel(const float* in_ptr,
float* out_ptr, const int dim, const int depth) {
CUDA_KERNEL_LOOP(tid, dim) {
out_ptr[tid * depth + (int)in_ptr[tid]] = 1.0;
}
}
template <>
SaberStatus SaberOneHot<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
OneHotParam<NV>& param) {
auto stream = _ctx->get_compute_stream();
const float* input_ptr = (const float*)inputs[0]->data();
float* output_ptr = (float*)outputs[0]->mutable_data();
int _depth = param.depth;
int dims = inputs[0]->valid_size();
hipMemsetAsync(output_ptr,
0,
outputs[0]->valid_size() * outputs[0]->get_dtype_size(),
stream);
hipLaunchKernelGGL(( fill_one_hot_kernel), dim3(CUDA_GET_BLOCKS(dims)), dim3(CUDA_NUM_THREADS), 0, stream,
input_ptr, output_ptr, dims, _depth);
return SaberSuccess;
}
template class SaberOneHot<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberOneHot, OneHotParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberOneHot, OneHotParam, NV, AK_INT8);
}
} | 230193c0d648685024ae9b3fcebe8289f9f57506.cu |
#include "saber/funcs/impl/cuda/saber_one_hot.h"
namespace anakin {
namespace saber {
template <>
SaberStatus SaberOneHot<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
OneHotParam<NV>& param, Context<NV>& ctx) {
return SaberSuccess;
}
template <>
SaberStatus SaberOneHot<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
OneHotParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
__global__ void fill_one_hot_kernel(const float* in_ptr,
float* out_ptr, const int dim, const int depth) {
CUDA_KERNEL_LOOP(tid, dim) {
out_ptr[tid * depth + (int)in_ptr[tid]] = 1.0;
}
}
template <>
SaberStatus SaberOneHot<NV, AK_FLOAT>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
OneHotParam<NV>& param) {
auto stream = _ctx->get_compute_stream();
const float* input_ptr = (const float*)inputs[0]->data();
float* output_ptr = (float*)outputs[0]->mutable_data();
int _depth = param.depth;
int dims = inputs[0]->valid_size();
cudaMemsetAsync(output_ptr,
0,
outputs[0]->valid_size() * outputs[0]->get_dtype_size(),
stream);
fill_one_hot_kernel<<<CUDA_GET_BLOCKS(dims), CUDA_NUM_THREADS, 0, stream>>>(
input_ptr, output_ptr, dims, _depth);
return SaberSuccess;
}
template class SaberOneHot<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberOneHot, OneHotParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberOneHot, OneHotParam, NV, AK_INT8);
}
} |
ee8a10abc86a25fbc27a6b324bc78e5696b3ef02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <math.h>
#include <thrust/tuple.h>
#include <cstdio>
#include <tuple>
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
namespace {
// A structure for holding details about a pixel.
struct Pixel {
float z;
int64_t idx; // idx of face
float dist; // abs distance of pixel to face
float3 bary;
};
__device__ bool operator<(const Pixel& a, const Pixel& b) {
return a.z < b.z || (a.z == b.z && a.idx < b.idx);
}
// Get the xyz coordinates of the three vertices for the face given by the
// index face_idx into face_verts.
__device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts(
const float* face_verts,
int face_idx) {
const float x0 = face_verts[face_idx * 9 + 0];
const float y0 = face_verts[face_idx * 9 + 1];
const float z0 = face_verts[face_idx * 9 + 2];
const float x1 = face_verts[face_idx * 9 + 3];
const float y1 = face_verts[face_idx * 9 + 4];
const float z1 = face_verts[face_idx * 9 + 5];
const float x2 = face_verts[face_idx * 9 + 6];
const float y2 = face_verts[face_idx * 9 + 7];
const float z2 = face_verts[face_idx * 9 + 8];
const float3 v0xyz = make_float3(x0, y0, z0);
const float3 v1xyz = make_float3(x1, y1, z1);
const float3 v2xyz = make_float3(x2, y2, z2);
return thrust::make_tuple(v0xyz, v1xyz, v2xyz);
}
// Get the min/max x/y/z values for the face given by vertices v0, v1, v2.
__device__ thrust::tuple<float2, float2, float2>
GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) {
const float xmin = FloatMin3(v0.x, v1.x, v2.x);
const float ymin = FloatMin3(v0.y, v1.y, v2.y);
const float zmin = FloatMin3(v0.z, v1.z, v2.z);
const float xmax = FloatMax3(v0.x, v1.x, v2.x);
const float ymax = FloatMax3(v0.y, v1.y, v2.y);
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
return thrust::make_tuple(
make_float2(xmin, xmax),
make_float2(ymin, ymax),
make_float2(zmin, zmax));
}
// Check if the point (px, py) lies outside the face bounding box face_bbox.
// Return true if the point is outside.
__device__ bool CheckPointOutsideBoundingBox(
float3 v0,
float3 v1,
float3 v2,
float blur_radius,
float2 pxy) {
const auto bbox = GetFaceBoundingBox(v0, v1, v2);
const float2 xlims = thrust::get<0>(bbox);
const float2 ylims = thrust::get<1>(bbox);
const float2 zlims = thrust::get<2>(bbox);
const float x_min = xlims.x - blur_radius;
const float y_min = ylims.x - blur_radius;
const float x_max = xlims.y + blur_radius;
const float y_max = ylims.y + blur_radius;
// Faces with at least one vertex behind the camera won't render correctly
// and should be removed or clipped before calling the rasterizer
const bool z_invalid = zlims.x < kEpsilon;
// Check if the current point is oustside the triangle bounding box.
return (
pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min ||
z_invalid);
}
// This function checks if a pixel given by xy location pxy lies within the
// face with index face_idx in face_verts. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the faces which intersect
// with this pixel sorted by closest z distance. If the point pxy lies in the
// face, the list (q) is updated and re-orderered in place. In addition
// the auxiliary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizeMeshesNaiveCudaKernel and
// RasterizeMeshesFineCudaKernel.
template <typename FaceQ>
__device__ void CheckPixelInsideFace(
const float* face_verts, // (F, 3, 3)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const int face_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
FaceQ& q,
const float blur_radius,
const float2 pxy, // Coordinates of the pixel
const int K,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
const auto v012 = GetSingleFaceVerts(face_verts, face_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only need xy for barycentric coordinates and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Perform checks and skip if:
// 1. the face is behind the camera
// 2. the face is facing away from the camera
// 3. the face has very small face area
// 4. the pixel is outside the face bbox
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
const bool outside_bbox = CheckPointOutsideBoundingBox(
v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox
const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy);
// Check if the face is visible to the camera.
const bool back_face = face_area < 0.0;
const bool zero_face_area =
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
zero_face_area) {
return;
}
// Calculate barycentric coords and euclidean dist to triangle.
const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 p_bary = !perspective_correct
? p_bary0
: BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z);
const float3 p_bary_clip =
!clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary);
const float pz =
p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z;
if (pz < 0) {
return; // Face is behind the image plane.
}
// Get abs squared distance
const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy);
// Use the unclipped bary coordinates to determine if the point is inside the
// face.
const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f;
const float signed_dist = inside ? -dist : dist;
// Check if pixel is outside blur region
if (!inside && dist >= blur_radius) {
return;
}
// Handle the case where a face (f) partially behind the image plane is
// clipped to a quadrilateral and then split into two faces (t1, t2). In this
// case we:
// 1. Find the index of the neighboring face (e.g. for t1 need index of t2)
// 2. Check if the neighboring face (t2) is already in the top K faces
// 3. If yes, compare the distance of the pixel to t1 with the distance to t2.
// 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces.
const int neighbor_idx = clipped_faces_neighbor_idx[face_idx];
int neighbor_idx_top_k = -1;
// Check if neighboring face is already in the top K.
// -1 is the fill value in clipped_faces_neighbor_idx
if (neighbor_idx != -1) {
// Only need to loop until q_size.
for (int i = 0; i < q_size; i++) {
if (q[i].idx == neighbor_idx) {
neighbor_idx_top_k = i;
break;
}
}
}
// If neighbor idx is not -1 then it is in the top K struct.
if (neighbor_idx_top_k != -1) {
// If dist of current face is less than neighbor then overwrite the
// neighbor face values in the top K struct.
float neighbor_dist = abs(q[neighbor_idx_top_k].dist);
if (dist < neighbor_dist) {
// Overwrite the neighbor face values
q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip};
// If pz > q_max then overwrite the max values and index of the max.
// q_size stays the same.
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = neighbor_idx_top_k;
}
}
} else {
// Handle as a normal face
if (q_size < K) {
// Just insert it.
q[q_size] = {pz, face_idx, signed_dist, p_bary_clip};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max.
q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesNaiveCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const int64_t* clipped_faces_neighbor_idx,
const float blur_radius,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int H,
const int W,
const int K,
int64_t* face_idxs,
float* zbuf,
float* pix_dists,
float* bary) {
// Simple version: One thread per output pixel
int num_threads = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // batch index.
const int pix_idx = i % (H * W);
// Reverse ordering of X and Y axes
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordinates of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the faces.
const int64_t face_start_idx = mesh_to_face_first_idx[n];
const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n];
// Loop through the faces in the mesh.
for (int f = face_start_idx; f < face_stop_idx; ++f) {
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
face_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist;
bary[(idx + k) * 3 + 0] = q[k].bary.x;
bary[(idx + k) * 3 + 1] = q[k].bary.y;
bary[(idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesNaiveCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_faces_packed_first_idx,
const at::Tensor& num_faces_per_mesh,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int num_closest,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(
num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0),
"num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have save size first dimension as face_verts");
if (num_closest > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_faces_packed_first_idx_t{
mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4};
at::CheckedFrom c = "RasterizeMeshesNaiveCuda";
at::checkAllSameGPU(
c,
{face_verts_t,
mesh_to_faces_packed_first_idx_t,
num_faces_per_mesh_t,
clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int N = num_faces_per_mesh.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = num_closest;
auto long_opts = num_faces_per_mesh.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO: benchmark parallelizing over faces_verts instead of over pixels.
__global__ void RasterizeMeshesBackwardCudaKernel(
const float* face_verts, // (F, 3, 3)
const int64_t* pix_to_face, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords,
const int N,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_bary, // (N, H, W, K, 3)
const float* grad_dists, // (N, H, W, K)
float* grad_face_verts) { // (F, 3, 3)
// Parallelize over each pixel in images of
// size H * W, for each image in the batch of size N.
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
// Convert linear index to 3D index
const int n = t_i / (H * W); // batch index.
const int pix_idx = t_i % (H * W);
// Reverse ordering of X and Y axes.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// Loop over all the faces for this pixel.
for (int k = 0; k < K; k++) {
// Index into (N, H, W, K, :) grad tensors
// pixel index + top k index
int i = n * H * W * K + pix_idx * K + k;
const int f = pix_to_face[i];
if (f < 0) {
continue; // padded face.
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only neex xy for barycentric coordinate and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Get upstream gradients for the face.
const float grad_dist_upstream = grad_dists[i];
const float grad_zbuf_upstream = grad_zbuf[i];
const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0];
const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1];
const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2];
const float3 grad_bary_upstream = make_float3(
grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2);
const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 b_pp = !perspective_correct
? b_w
: BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z);
const float3 b_w_clip =
!clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp);
const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f;
const float sign = inside ? -1.0f : 1.0f;
auto grad_dist_f = PointTriangleDistanceBackward(
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f);
const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f);
const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f);
// Upstream gradient for barycentric coords from zbuf calculation:
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
// Therefore
// d_zbuf/d_bary_w0 = z0
// d_zbuf/d_bary_w1 = z1
// d_zbuf/d_bary_w2 = z2
const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z);
// Total upstream barycentric gradients are the sum of
// external upstream gradients and contribution from zbuf.
const float3 grad_bary_f_sum =
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip);
float3 grad_bary0 = grad_bary_f_sum;
if (clip_barycentric_coords) {
grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum);
}
float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f;
if (perspective_correct) {
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
b_w, v0.z, v1.z, v2.z, grad_bary0);
grad_bary0 = thrust::get<0>(perspective_grads);
dz0_persp = thrust::get<1>(perspective_grads);
dz1_persp = thrust::get<2>(perspective_grads);
dz2_persp = thrust::get<3>(perspective_grads);
}
auto grad_bary_f =
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f);
const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f);
const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f);
atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x);
atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y);
atomicAdd(
grad_face_verts + f * 9 + 2,
grad_zbuf_upstream * b_w_clip.x + dz0_persp);
atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x);
atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y);
atomicAdd(
grad_face_verts + f * 9 + 5,
grad_zbuf_upstream * b_w_clip.y + dz1_persp);
atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x);
atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y);
atomicAdd(
grad_face_verts + f * 9 + 8,
grad_zbuf_upstream * b_w_clip.z + dz2_persp);
}
}
}
at::Tensor RasterizeMeshesBackwardCuda(
const at::Tensor& face_verts, // (F, 3, 3)
const at::Tensor& pix_to_face, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_bary, // (N, H, W, K, 3)
const at::Tensor& grad_dists, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords) {
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
pix_to_face_t{pix_to_face, "pix_to_face", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_bary_t{grad_bary, "grad_bary", 4},
grad_dists_t{grad_dists, "grad_dists", 5};
at::CheckedFrom c = "RasterizeMeshesBackwardCuda";
at::checkAllSameGPU(
c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
at::checkAllSameType(
c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
// This is nondeterministic because atomicAdd
at::globalContext().alertNotDeterministic("RasterizeMeshesBackwardCuda");
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int F = face_verts.size(0);
const int N = pix_to_face.size(0);
const int H = pix_to_face.size(1);
const int W = pix_to_face.size(2);
const int K = pix_to_face.size(3);
at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options());
if (grad_face_verts.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_face_verts;
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
pix_to_face.contiguous().data_ptr<int64_t>(),
perspective_correct,
clip_barycentric_coords,
N,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_bary.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_face_verts.data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return grad_face_verts;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesFineCudaKernel(
const float* face_verts, // (F, 3, 3)
const int32_t* bin_faces, // (N, BH, BW, T)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const float blur_radius,
const int bin_size,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int BH,
const int BW,
const int M,
const int H,
const int W,
const int K,
int64_t* face_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists, // (N, H, W, K)
float* bary // (N, H, W, K, 3)
) {
// This can be more than H * W if H or W are not divisible by bin_size.
int num_pixels = N * BH * BW * bin_size * bin_size;
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from faces and bin_faces.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
// bin index y
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
// bin index y
const int bx = i / (bin_size * bin_size);
// pixel within the bin
i %= bin_size * bin_size;
// Pixel x, y indices
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// This part looks like the naive rasterization kernel, except we use
// bin_faces to only look at a subset of faces already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; m++) {
const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m];
if (f < 0) {
continue; // bin_faces uses -1 as a sentinal value.
}
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// Now we've looked at all the faces for this bin, so we can write
// output for the current pixel.
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; k++) {
face_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist;
bary[(pix_idx + k) * 3 + 0] = q[k].bary.x;
bary[(pix_idx + k) * 3 + 1] = q[k].bary.y;
bary[(pix_idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesFineCuda(
const at::Tensor& face_verts,
const at::Tensor& bin_faces,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int faces_per_pixel,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have the same first dimension as face_verts");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
bin_faces_t{bin_faces, "bin_faces", 2},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3};
at::CheckedFrom c = "RasterizeMeshesFineCuda";
at::checkAllSameGPU(
c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// bin_faces shape (N, BH, BW, M)
const int N = bin_faces.size(0);
const int BH = bin_faces.size(1);
const int BW = bin_faces.size(2);
const int M = bin_faces.size(3);
const int K = faces_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto long_opts = bin_faces.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesFineCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
bin_faces.contiguous().data_ptr<int32_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
bin_size,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
BH,
BW,
M,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
| ee8a10abc86a25fbc27a6b324bc78e5696b3ef02.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <math.h>
#include <thrust/tuple.h>
#include <cstdio>
#include <tuple>
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
namespace {
// A structure for holding details about a pixel.
struct Pixel {
float z;
int64_t idx; // idx of face
float dist; // abs distance of pixel to face
float3 bary;
};
__device__ bool operator<(const Pixel& a, const Pixel& b) {
return a.z < b.z || (a.z == b.z && a.idx < b.idx);
}
// Get the xyz coordinates of the three vertices for the face given by the
// index face_idx into face_verts.
__device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts(
const float* face_verts,
int face_idx) {
const float x0 = face_verts[face_idx * 9 + 0];
const float y0 = face_verts[face_idx * 9 + 1];
const float z0 = face_verts[face_idx * 9 + 2];
const float x1 = face_verts[face_idx * 9 + 3];
const float y1 = face_verts[face_idx * 9 + 4];
const float z1 = face_verts[face_idx * 9 + 5];
const float x2 = face_verts[face_idx * 9 + 6];
const float y2 = face_verts[face_idx * 9 + 7];
const float z2 = face_verts[face_idx * 9 + 8];
const float3 v0xyz = make_float3(x0, y0, z0);
const float3 v1xyz = make_float3(x1, y1, z1);
const float3 v2xyz = make_float3(x2, y2, z2);
return thrust::make_tuple(v0xyz, v1xyz, v2xyz);
}
// Get the min/max x/y/z values for the face given by vertices v0, v1, v2.
__device__ thrust::tuple<float2, float2, float2>
GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) {
const float xmin = FloatMin3(v0.x, v1.x, v2.x);
const float ymin = FloatMin3(v0.y, v1.y, v2.y);
const float zmin = FloatMin3(v0.z, v1.z, v2.z);
const float xmax = FloatMax3(v0.x, v1.x, v2.x);
const float ymax = FloatMax3(v0.y, v1.y, v2.y);
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
return thrust::make_tuple(
make_float2(xmin, xmax),
make_float2(ymin, ymax),
make_float2(zmin, zmax));
}
// Check if the point (px, py) lies outside the face bounding box face_bbox.
// Return true if the point is outside.
__device__ bool CheckPointOutsideBoundingBox(
float3 v0,
float3 v1,
float3 v2,
float blur_radius,
float2 pxy) {
const auto bbox = GetFaceBoundingBox(v0, v1, v2);
const float2 xlims = thrust::get<0>(bbox);
const float2 ylims = thrust::get<1>(bbox);
const float2 zlims = thrust::get<2>(bbox);
const float x_min = xlims.x - blur_radius;
const float y_min = ylims.x - blur_radius;
const float x_max = xlims.y + blur_radius;
const float y_max = ylims.y + blur_radius;
// Faces with at least one vertex behind the camera won't render correctly
// and should be removed or clipped before calling the rasterizer
const bool z_invalid = zlims.x < kEpsilon;
// Check if the current point is oustside the triangle bounding box.
return (
pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min ||
z_invalid);
}
// This function checks if a pixel given by xy location pxy lies within the
// face with index face_idx in face_verts. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the faces which intersect
// with this pixel sorted by closest z distance. If the point pxy lies in the
// face, the list (q) is updated and re-orderered in place. In addition
// the auxiliary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizeMeshesNaiveCudaKernel and
// RasterizeMeshesFineCudaKernel.
template <typename FaceQ>
__device__ void CheckPixelInsideFace(
const float* face_verts, // (F, 3, 3)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const int face_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
FaceQ& q,
const float blur_radius,
const float2 pxy, // Coordinates of the pixel
const int K,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
const auto v012 = GetSingleFaceVerts(face_verts, face_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only need xy for barycentric coordinates and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Perform checks and skip if:
// 1. the face is behind the camera
// 2. the face is facing away from the camera
// 3. the face has very small face area
// 4. the pixel is outside the face bbox
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
const bool outside_bbox = CheckPointOutsideBoundingBox(
v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox
const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy);
// Check if the face is visible to the camera.
const bool back_face = face_area < 0.0;
const bool zero_face_area =
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
zero_face_area) {
return;
}
// Calculate barycentric coords and euclidean dist to triangle.
const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 p_bary = !perspective_correct
? p_bary0
: BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z);
const float3 p_bary_clip =
!clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary);
const float pz =
p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z;
if (pz < 0) {
return; // Face is behind the image plane.
}
// Get abs squared distance
const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy);
// Use the unclipped bary coordinates to determine if the point is inside the
// face.
const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f;
const float signed_dist = inside ? -dist : dist;
// Check if pixel is outside blur region
if (!inside && dist >= blur_radius) {
return;
}
// Handle the case where a face (f) partially behind the image plane is
// clipped to a quadrilateral and then split into two faces (t1, t2). In this
// case we:
// 1. Find the index of the neighboring face (e.g. for t1 need index of t2)
// 2. Check if the neighboring face (t2) is already in the top K faces
// 3. If yes, compare the distance of the pixel to t1 with the distance to t2.
// 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces.
const int neighbor_idx = clipped_faces_neighbor_idx[face_idx];
int neighbor_idx_top_k = -1;
// Check if neighboring face is already in the top K.
// -1 is the fill value in clipped_faces_neighbor_idx
if (neighbor_idx != -1) {
// Only need to loop until q_size.
for (int i = 0; i < q_size; i++) {
if (q[i].idx == neighbor_idx) {
neighbor_idx_top_k = i;
break;
}
}
}
// If neighbor idx is not -1 then it is in the top K struct.
if (neighbor_idx_top_k != -1) {
// If dist of current face is less than neighbor then overwrite the
// neighbor face values in the top K struct.
float neighbor_dist = abs(q[neighbor_idx_top_k].dist);
if (dist < neighbor_dist) {
// Overwrite the neighbor face values
q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip};
// If pz > q_max then overwrite the max values and index of the max.
// q_size stays the same.
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = neighbor_idx_top_k;
}
}
} else {
// Handle as a normal face
if (q_size < K) {
// Just insert it.
q[q_size] = {pz, face_idx, signed_dist, p_bary_clip};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max.
q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesNaiveCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const int64_t* clipped_faces_neighbor_idx,
const float blur_radius,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int H,
const int W,
const int K,
int64_t* face_idxs,
float* zbuf,
float* pix_dists,
float* bary) {
// Simple version: One thread per output pixel
int num_threads = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // batch index.
const int pix_idx = i % (H * W);
// Reverse ordering of X and Y axes
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordinates of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the faces.
const int64_t face_start_idx = mesh_to_face_first_idx[n];
const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n];
// Loop through the faces in the mesh.
for (int f = face_start_idx; f < face_stop_idx; ++f) {
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
face_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist;
bary[(idx + k) * 3 + 0] = q[k].bary.x;
bary[(idx + k) * 3 + 1] = q[k].bary.y;
bary[(idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesNaiveCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_faces_packed_first_idx,
const at::Tensor& num_faces_per_mesh,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int num_closest,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(
num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0),
"num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have save size first dimension as face_verts");
if (num_closest > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_faces_packed_first_idx_t{
mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4};
at::CheckedFrom c = "RasterizeMeshesNaiveCuda";
at::checkAllSameGPU(
c,
{face_verts_t,
mesh_to_faces_packed_first_idx_t,
num_faces_per_mesh_t,
clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int N = num_faces_per_mesh.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = num_closest;
auto long_opts = num_faces_per_mesh.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesNaiveCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO: benchmark parallelizing over faces_verts instead of over pixels.
__global__ void RasterizeMeshesBackwardCudaKernel(
const float* face_verts, // (F, 3, 3)
const int64_t* pix_to_face, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords,
const int N,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_bary, // (N, H, W, K, 3)
const float* grad_dists, // (N, H, W, K)
float* grad_face_verts) { // (F, 3, 3)
// Parallelize over each pixel in images of
// size H * W, for each image in the batch of size N.
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
// Convert linear index to 3D index
const int n = t_i / (H * W); // batch index.
const int pix_idx = t_i % (H * W);
// Reverse ordering of X and Y axes.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// Loop over all the faces for this pixel.
for (int k = 0; k < K; k++) {
// Index into (N, H, W, K, :) grad tensors
// pixel index + top k index
int i = n * H * W * K + pix_idx * K + k;
const int f = pix_to_face[i];
if (f < 0) {
continue; // padded face.
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only neex xy for barycentric coordinate and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Get upstream gradients for the face.
const float grad_dist_upstream = grad_dists[i];
const float grad_zbuf_upstream = grad_zbuf[i];
const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0];
const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1];
const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2];
const float3 grad_bary_upstream = make_float3(
grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2);
const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 b_pp = !perspective_correct
? b_w
: BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z);
const float3 b_w_clip =
!clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp);
const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f;
const float sign = inside ? -1.0f : 1.0f;
auto grad_dist_f = PointTriangleDistanceBackward(
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f);
const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f);
const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f);
// Upstream gradient for barycentric coords from zbuf calculation:
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
// Therefore
// d_zbuf/d_bary_w0 = z0
// d_zbuf/d_bary_w1 = z1
// d_zbuf/d_bary_w2 = z2
const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z);
// Total upstream barycentric gradients are the sum of
// external upstream gradients and contribution from zbuf.
const float3 grad_bary_f_sum =
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip);
float3 grad_bary0 = grad_bary_f_sum;
if (clip_barycentric_coords) {
grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum);
}
float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f;
if (perspective_correct) {
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
b_w, v0.z, v1.z, v2.z, grad_bary0);
grad_bary0 = thrust::get<0>(perspective_grads);
dz0_persp = thrust::get<1>(perspective_grads);
dz1_persp = thrust::get<2>(perspective_grads);
dz2_persp = thrust::get<3>(perspective_grads);
}
auto grad_bary_f =
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f);
const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f);
const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f);
atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x);
atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y);
atomicAdd(
grad_face_verts + f * 9 + 2,
grad_zbuf_upstream * b_w_clip.x + dz0_persp);
atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x);
atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y);
atomicAdd(
grad_face_verts + f * 9 + 5,
grad_zbuf_upstream * b_w_clip.y + dz1_persp);
atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x);
atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y);
atomicAdd(
grad_face_verts + f * 9 + 8,
grad_zbuf_upstream * b_w_clip.z + dz2_persp);
}
}
}
at::Tensor RasterizeMeshesBackwardCuda(
const at::Tensor& face_verts, // (F, 3, 3)
const at::Tensor& pix_to_face, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_bary, // (N, H, W, K, 3)
const at::Tensor& grad_dists, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords) {
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
pix_to_face_t{pix_to_face, "pix_to_face", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_bary_t{grad_bary, "grad_bary", 4},
grad_dists_t{grad_dists, "grad_dists", 5};
at::CheckedFrom c = "RasterizeMeshesBackwardCuda";
at::checkAllSameGPU(
c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
at::checkAllSameType(
c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
// This is nondeterministic because atomicAdd
at::globalContext().alertNotDeterministic("RasterizeMeshesBackwardCuda");
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int F = face_verts.size(0);
const int N = pix_to_face.size(0);
const int H = pix_to_face.size(1);
const int W = pix_to_face.size(2);
const int K = pix_to_face.size(3);
at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options());
if (grad_face_verts.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_face_verts;
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesBackwardCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
pix_to_face.contiguous().data_ptr<int64_t>(),
perspective_correct,
clip_barycentric_coords,
N,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_bary.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_face_verts.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return grad_face_verts;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesFineCudaKernel(
const float* face_verts, // (F, 3, 3)
const int32_t* bin_faces, // (N, BH, BW, T)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const float blur_radius,
const int bin_size,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int BH,
const int BW,
const int M,
const int H,
const int W,
const int K,
int64_t* face_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists, // (N, H, W, K)
float* bary // (N, H, W, K, 3)
) {
// This can be more than H * W if H or W are not divisible by bin_size.
int num_pixels = N * BH * BW * bin_size * bin_size;
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from faces and bin_faces.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
// bin index y
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
// bin index y
const int bx = i / (bin_size * bin_size);
// pixel within the bin
i %= bin_size * bin_size;
// Pixel x, y indices
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// This part looks like the naive rasterization kernel, except we use
// bin_faces to only look at a subset of faces already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; m++) {
const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m];
if (f < 0) {
continue; // bin_faces uses -1 as a sentinal value.
}
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// Now we've looked at all the faces for this bin, so we can write
// output for the current pixel.
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; k++) {
face_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist;
bary[(pix_idx + k) * 3 + 0] = q[k].bary.x;
bary[(pix_idx + k) * 3 + 1] = q[k].bary.y;
bary[(pix_idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesFineCuda(
const at::Tensor& face_verts,
const at::Tensor& bin_faces,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int faces_per_pixel,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have the same first dimension as face_verts");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
bin_faces_t{bin_faces, "bin_faces", 2},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3};
at::CheckedFrom c = "RasterizeMeshesFineCuda";
at::checkAllSameGPU(
c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// bin_faces shape (N, BH, BW, M)
const int N = bin_faces.size(0);
const int BH = bin_faces.size(1);
const int BW = bin_faces.size(2);
const int M = bin_faces.size(3);
const int K = faces_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto long_opts = bin_faces.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesFineCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
bin_faces.contiguous().data_ptr<int32_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
bin_size,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
BH,
BW,
M,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
|
21a20235c517950e14e0ca319e0efd9474a16529.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include <stdio.h>
#include <stdlib.h>
#include "Utilities.cuh"
using namespace cub;
/*******************************/
/* CUB BLOCKSORT KERNEL SHARED */
/*******************************/
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void shared_BlockSortKernel(float *d_valuesA, float *d_valuesB, int *d_keys, float *d_values_resultA, float *d_values_resultB, int *d_keys_result)
{
// --- Shared memory allocation
__shared__ float sharedMemoryArrayValuesA[BLOCK_THREADS * ITEMS_PER_THREAD];
__shared__ float sharedMemoryArrayValuesB[BLOCK_THREADS * ITEMS_PER_THREAD];
__shared__ int sharedMemoryArrayKeys[BLOCK_THREADS * ITEMS_PER_THREAD];
__shared__ int sharedMemoryHelperIndices[BLOCK_THREADS * ITEMS_PER_THREAD];
// --- Specialize BlockStore and BlockRadixSort collective types
typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD, int> BlockRadixSortT;
// --- Allocate type-safe, repurposable shared memory for collectives
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);
// --- Load data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
sharedMemoryArrayValuesA [threadIdx.x * ITEMS_PER_THREAD + k] = d_valuesA[block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
sharedMemoryArrayValuesB [threadIdx.x * ITEMS_PER_THREAD + k] = d_valuesB[block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
sharedMemoryArrayKeys [threadIdx.x * ITEMS_PER_THREAD + k] = d_keys [block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
sharedMemoryHelperIndices[threadIdx.x * ITEMS_PER_THREAD + k] = threadIdx.x * ITEMS_PER_THREAD + k ;
}
__syncthreads();
// --- Collectively sort the keys
BlockRadixSortT(temp_storage).SortBlockedToStriped(*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryArrayKeys + (threadIdx.x * ITEMS_PER_THREAD))),
*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryHelperIndices + (threadIdx.x * ITEMS_PER_THREAD))));
__syncthreads();
// --- Write data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
d_values_resultA[block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArrayValuesA[sharedMemoryHelperIndices[threadIdx.x * ITEMS_PER_THREAD + k]];
d_values_resultB[block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArrayValuesB[sharedMemoryHelperIndices[threadIdx.x * ITEMS_PER_THREAD + k]];
d_keys_result [block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArrayKeys [threadIdx.x * ITEMS_PER_THREAD + k];
}
}
/********/
/* MAIN */
/********/
int main() {
const int numElemsPerArray = 8;
const int numArrays = 4;
const int N = numArrays * numElemsPerArray;
const int numElemsPerThread = 4;
const int RANGE = N * numElemsPerThread;
// --- Allocating and initializing the data on the host
float *h_valuesA = (float *)malloc(N * sizeof(float));
float *h_valuesB = (float *)malloc(N * sizeof(float));
int *h_keys = (int *) malloc(N * sizeof(int));
for (int i = 0 ; i < N; i++) {
h_valuesA[i] = rand() % RANGE;
h_valuesB[i] = rand() % RANGE;
h_keys[i] = rand() % RANGE;
}
printf("Original\n\n");
for (int k = 0; k < numArrays; k++)
for (int i = 0; i < numElemsPerArray; i++)
printf("Array nr. %i; Element nr. %i; Key %i; Value A %f; Value B %f\n", k, i, h_keys[k * numElemsPerArray + i], h_valuesA[k * numElemsPerArray + i], h_valuesB[k * numElemsPerArray + i]);
// --- Allocating the results on the host
float *h_values_resultA = (float *)malloc(N * sizeof(float));
float *h_values_resultB = (float *)malloc(N * sizeof(float));
float *h_values_result2 = (float *)malloc(N * sizeof(float));
int *h_keys_result1 = (int *) malloc(N * sizeof(int));
int *h_keys_result2 = (int *) malloc(N * sizeof(int));
// --- Allocating space for data and results on device
float *d_valuesA; gpuErrchk(hipMalloc((void **)&d_valuesA, N * sizeof(float)));
float *d_valuesB; gpuErrchk(hipMalloc((void **)&d_valuesB, N * sizeof(float)));
int *d_keys; gpuErrchk(hipMalloc((void **)&d_keys, N * sizeof(int)));
float *d_values_resultA; gpuErrchk(hipMalloc((void **)&d_values_resultA, N * sizeof(float)));
float *d_values_resultB; gpuErrchk(hipMalloc((void **)&d_values_resultB, N * sizeof(float)));
float *d_values_result2; gpuErrchk(hipMalloc((void **)&d_values_result2, N * sizeof(float)));
int *d_keys_result1; gpuErrchk(hipMalloc((void **)&d_keys_result1, N * sizeof(int)));
int *d_keys_result2; gpuErrchk(hipMalloc((void **)&d_keys_result2, N * sizeof(int)));
// --- BlockSortKernel with shared
gpuErrchk(hipMemcpy(d_valuesA, h_valuesA, N * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_valuesB, h_valuesB, N * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_keys, h_keys, N * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( shared_BlockSortKernel<N / numArrays / numElemsPerThread, numElemsPerThread>), dim3(numArrays), dim3(numElemsPerArray / numElemsPerThread), 0, 0, d_valuesA, d_valuesB, d_keys, d_values_resultA, d_values_resultB, d_keys_result1);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_values_resultA, d_values_resultA, N * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_values_resultB, d_values_resultB, N * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(h_keys_result1, d_keys_result1, N * sizeof(int), hipMemcpyDeviceToHost));
printf("\n\nBlockSortKernel using shared memory\n\n");
for (int k = 0; k < numArrays; k++)
for (int i = 0; i < numElemsPerArray; i++)
printf("Array nr. %i; Element nr. %i; Key %i; Value %f; Value %f\n", k, i, h_keys_result1[k * numElemsPerArray + i], h_values_resultA[k * numElemsPerArray + i], h_values_resultB[k * numElemsPerArray + i]);
return 0;
}
| 21a20235c517950e14e0ca319e0efd9474a16529.cu | #include <cub/cub.cuh>
#include <stdio.h>
#include <stdlib.h>
#include "Utilities.cuh"
using namespace cub;
/*******************************/
/* CUB BLOCKSORT KERNEL SHARED */
/*******************************/
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void shared_BlockSortKernel(float *d_valuesA, float *d_valuesB, int *d_keys, float *d_values_resultA, float *d_values_resultB, int *d_keys_result)
{
// --- Shared memory allocation
__shared__ float sharedMemoryArrayValuesA[BLOCK_THREADS * ITEMS_PER_THREAD];
__shared__ float sharedMemoryArrayValuesB[BLOCK_THREADS * ITEMS_PER_THREAD];
__shared__ int sharedMemoryArrayKeys[BLOCK_THREADS * ITEMS_PER_THREAD];
__shared__ int sharedMemoryHelperIndices[BLOCK_THREADS * ITEMS_PER_THREAD];
// --- Specialize BlockStore and BlockRadixSort collective types
typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD, int> BlockRadixSortT;
// --- Allocate type-safe, repurposable shared memory for collectives
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);
// --- Load data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
sharedMemoryArrayValuesA [threadIdx.x * ITEMS_PER_THREAD + k] = d_valuesA[block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
sharedMemoryArrayValuesB [threadIdx.x * ITEMS_PER_THREAD + k] = d_valuesB[block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
sharedMemoryArrayKeys [threadIdx.x * ITEMS_PER_THREAD + k] = d_keys [block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
sharedMemoryHelperIndices[threadIdx.x * ITEMS_PER_THREAD + k] = threadIdx.x * ITEMS_PER_THREAD + k ;
}
__syncthreads();
// --- Collectively sort the keys
BlockRadixSortT(temp_storage).SortBlockedToStriped(*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryArrayKeys + (threadIdx.x * ITEMS_PER_THREAD))),
*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryHelperIndices + (threadIdx.x * ITEMS_PER_THREAD))));
__syncthreads();
// --- Write data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++) {
d_values_resultA[block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArrayValuesA[sharedMemoryHelperIndices[threadIdx.x * ITEMS_PER_THREAD + k]];
d_values_resultB[block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArrayValuesB[sharedMemoryHelperIndices[threadIdx.x * ITEMS_PER_THREAD + k]];
d_keys_result [block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArrayKeys [threadIdx.x * ITEMS_PER_THREAD + k];
}
}
/********/
/* MAIN */
/********/
int main() {
const int numElemsPerArray = 8;
const int numArrays = 4;
const int N = numArrays * numElemsPerArray;
const int numElemsPerThread = 4;
const int RANGE = N * numElemsPerThread;
// --- Allocating and initializing the data on the host
float *h_valuesA = (float *)malloc(N * sizeof(float));
float *h_valuesB = (float *)malloc(N * sizeof(float));
int *h_keys = (int *) malloc(N * sizeof(int));
for (int i = 0 ; i < N; i++) {
h_valuesA[i] = rand() % RANGE;
h_valuesB[i] = rand() % RANGE;
h_keys[i] = rand() % RANGE;
}
printf("Original\n\n");
for (int k = 0; k < numArrays; k++)
for (int i = 0; i < numElemsPerArray; i++)
printf("Array nr. %i; Element nr. %i; Key %i; Value A %f; Value B %f\n", k, i, h_keys[k * numElemsPerArray + i], h_valuesA[k * numElemsPerArray + i], h_valuesB[k * numElemsPerArray + i]);
// --- Allocating the results on the host
float *h_values_resultA = (float *)malloc(N * sizeof(float));
float *h_values_resultB = (float *)malloc(N * sizeof(float));
float *h_values_result2 = (float *)malloc(N * sizeof(float));
int *h_keys_result1 = (int *) malloc(N * sizeof(int));
int *h_keys_result2 = (int *) malloc(N * sizeof(int));
// --- Allocating space for data and results on device
float *d_valuesA; gpuErrchk(cudaMalloc((void **)&d_valuesA, N * sizeof(float)));
float *d_valuesB; gpuErrchk(cudaMalloc((void **)&d_valuesB, N * sizeof(float)));
int *d_keys; gpuErrchk(cudaMalloc((void **)&d_keys, N * sizeof(int)));
float *d_values_resultA; gpuErrchk(cudaMalloc((void **)&d_values_resultA, N * sizeof(float)));
float *d_values_resultB; gpuErrchk(cudaMalloc((void **)&d_values_resultB, N * sizeof(float)));
float *d_values_result2; gpuErrchk(cudaMalloc((void **)&d_values_result2, N * sizeof(float)));
int *d_keys_result1; gpuErrchk(cudaMalloc((void **)&d_keys_result1, N * sizeof(int)));
int *d_keys_result2; gpuErrchk(cudaMalloc((void **)&d_keys_result2, N * sizeof(int)));
// --- BlockSortKernel with shared
gpuErrchk(cudaMemcpy(d_valuesA, h_valuesA, N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_valuesB, h_valuesB, N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_keys, h_keys, N * sizeof(int), cudaMemcpyHostToDevice));
shared_BlockSortKernel<N / numArrays / numElemsPerThread, numElemsPerThread><<<numArrays, numElemsPerArray / numElemsPerThread>>>(d_valuesA, d_valuesB, d_keys, d_values_resultA, d_values_resultB, d_keys_result1);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_values_resultA, d_values_resultA, N * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_values_resultB, d_values_resultB, N * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_keys_result1, d_keys_result1, N * sizeof(int), cudaMemcpyDeviceToHost));
printf("\n\nBlockSortKernel using shared memory\n\n");
for (int k = 0; k < numArrays; k++)
for (int i = 0; i < numElemsPerArray; i++)
printf("Array nr. %i; Element nr. %i; Key %i; Value %f; Value %f\n", k, i, h_keys_result1[k * numElemsPerArray + i], h_values_resultA[k * numElemsPerArray + i], h_values_resultB[k * numElemsPerArray + i]);
return 0;
}
|
f9ed621ce3a6de6e281c15b951f0958e81b3b93d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <silo.h>
#include "sub_grid.hpp"
#include "cpu.hpp"
#include "gpu.hpp"
real sub_grid::hydro_kernel(sub_grid_exec_policy policy, int rk) {
real amax;
switch (policy) {
case CPU: {
thrust::host_vector<real> a(size, 0.0);
hydro_cpu_kernel(U.data(), dU.data(), a.data(), nx, ny, nz, dx, dy, dz, rk);
if (rk == 0) {
amax = *thrust::max_element(a.begin(), a.end());
} else {
amax = 0.0;
}
break;
}
case GPU: {
dim3 threads(WARP_SIZE);
dim3 blocks((nx - 2 * BW) / WARP_SIZE, ny - 2 * BW, nz - 2 * BW);
thrust::device_vector<real> gpu_U = U;
thrust::device_vector<state_var<real>> gpu_dU = dU;
if (rk == 0) {
thrust::device_vector<real> gpu_a(size, 0.0);
hipLaunchKernelGGL(( hydro_gpu_kernel), dim3(blocks),dim3(threads), 0, 0, gpu_U.data().get(), gpu_dU.data().get(), gpu_a.data().get(), nx, ny, nz, dx, dy, dz, rk );
amax = *thrust::max_element(gpu_a.begin(), gpu_a.end());
} else {
hipLaunchKernelGGL(( hydro_gpu_kernel), dim3(blocks),dim3(threads), 0, 0, gpu_U.data().get(), gpu_dU.data().get(), nullptr, nx, ny, nz, dx, dy, dz, rk );
amax = 0.0;
}
dU = gpu_dU;
break;
}
}
return amax;
}
void sub_grid::hydro_compute_u(sub_grid_exec_policy policy, real dt, int rk) {
switch (policy) {
case CPU:
hydro_cpu_compute_u(U.data(), dU.data(), nx, ny, nz, dt, rk);
break;
case GPU:
dim3 threads(nx - 2 * BW);
dim3 blocks(ny - 2 * BW, nz - 2 * BW);
thrust::device_vector<real> gpu_U = U;
thrust::device_vector<state_var<real>> gpu_dU = dU;
hipLaunchKernelGGL(( hydro_gpu_compute_u), dim3(blocks),dim3(threads), 0, 0, gpu_U.data().get(), gpu_dU.data().get(), nx, ny, nz, dt, rk);
// hipDeviceSynchronize();
U = gpu_U;
dU = gpu_dU;
break;
}
}
int sub_grid::index(int i, int j, int k) const {
return i + dims[XDIM] * (j + dims[YDIM] * k);
}
real sub_grid::x(int i) const {
return dx * (real(i) + 0.5);
}
real sub_grid::y(int i) const {
return dy * (real(i) + 0.5);
}
real sub_grid::z(int i) const {
return dz * (real(i) + 0.5);
}
sub_grid::sub_grid(int _nx, int _ny, int _nz, double spanx, double spany, double spanz) :
fgamma(5.0 / 3.0), size(_nx * _ny * _nz), dims( { _nx, _ny, _nz }), dX( { spanx, spany, spanz }), U(NF * size), dU(size), nx(dims[XDIM]), ny(
dims[YDIM]), nz(dims[ZDIM]), dx(dX[XDIM]), dy(dX[YDIM]), dz(dX[ZDIM]) {
}
sub_grid::~sub_grid() {
}
| f9ed621ce3a6de6e281c15b951f0958e81b3b93d.cu | #include <silo.h>
#include "sub_grid.hpp"
#include "cpu.hpp"
#include "gpu.hpp"
real sub_grid::hydro_kernel(sub_grid_exec_policy policy, int rk) {
real amax;
switch (policy) {
case CPU: {
thrust::host_vector<real> a(size, 0.0);
hydro_cpu_kernel(U.data(), dU.data(), a.data(), nx, ny, nz, dx, dy, dz, rk);
if (rk == 0) {
amax = *thrust::max_element(a.begin(), a.end());
} else {
amax = 0.0;
}
break;
}
case GPU: {
dim3 threads(WARP_SIZE);
dim3 blocks((nx - 2 * BW) / WARP_SIZE, ny - 2 * BW, nz - 2 * BW);
thrust::device_vector<real> gpu_U = U;
thrust::device_vector<state_var<real>> gpu_dU = dU;
if (rk == 0) {
thrust::device_vector<real> gpu_a(size, 0.0);
hydro_gpu_kernel<<<blocks,threads>>>(gpu_U.data().get(), gpu_dU.data().get(), gpu_a.data().get(), nx, ny, nz, dx, dy, dz, rk );
amax = *thrust::max_element(gpu_a.begin(), gpu_a.end());
} else {
hydro_gpu_kernel<<<blocks,threads>>>(gpu_U.data().get(), gpu_dU.data().get(), nullptr, nx, ny, nz, dx, dy, dz, rk );
amax = 0.0;
}
dU = gpu_dU;
break;
}
}
return amax;
}
void sub_grid::hydro_compute_u(sub_grid_exec_policy policy, real dt, int rk) {
switch (policy) {
case CPU:
hydro_cpu_compute_u(U.data(), dU.data(), nx, ny, nz, dt, rk);
break;
case GPU:
dim3 threads(nx - 2 * BW);
dim3 blocks(ny - 2 * BW, nz - 2 * BW);
thrust::device_vector<real> gpu_U = U;
thrust::device_vector<state_var<real>> gpu_dU = dU;
hydro_gpu_compute_u<<<blocks,threads>>>(gpu_U.data().get(), gpu_dU.data().get(), nx, ny, nz, dt, rk);
// cudaThreadSynchronize();
U = gpu_U;
dU = gpu_dU;
break;
}
}
int sub_grid::index(int i, int j, int k) const {
return i + dims[XDIM] * (j + dims[YDIM] * k);
}
real sub_grid::x(int i) const {
return dx * (real(i) + 0.5);
}
real sub_grid::y(int i) const {
return dy * (real(i) + 0.5);
}
real sub_grid::z(int i) const {
return dz * (real(i) + 0.5);
}
sub_grid::sub_grid(int _nx, int _ny, int _nz, double spanx, double spany, double spanz) :
fgamma(5.0 / 3.0), size(_nx * _ny * _nz), dims( { _nx, _ny, _nz }), dX( { spanx, spany, spanz }), U(NF * size), dU(size), nx(dims[XDIM]), ny(
dims[YDIM]), nz(dims[ZDIM]), dx(dX[XDIM]), dy(dX[YDIM]), dz(dX[ZDIM]) {
}
sub_grid::~sub_grid() {
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.